diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go index 05523dd244..80647ce52f 100644 --- a/accounts/abi/bind/base.go +++ b/accounts/abi/bind/base.go @@ -54,6 +54,8 @@ type TransactOpts struct { GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate) Context context.Context // Network context to support cancellation and timeouts (nil = no timeout) + + NoSend bool // Do all transact steps but do not send the transaction } // FilterOpts is the collection of options to fine tune filtering for events @@ -260,6 +262,9 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i if err != nil { return nil, err } + if opts.NoSend { + return signedTx, nil + } if err := c.transactor.SendTransaction(ensureContext(opts.Context), signedTx); err != nil { return nil, err } diff --git a/accounts/usbwallet/ledger.go b/accounts/usbwallet/ledger.go index 71f0f9392f..3de3b4091c 100644 --- a/accounts/usbwallet/ledger.go +++ b/accounts/usbwallet/ledger.go @@ -52,8 +52,10 @@ const ( ledgerOpRetrieveAddress ledgerOpcode = 0x02 // Returns the public key and Ethereum address for a given BIP 32 path ledgerOpSignTransaction ledgerOpcode = 0x04 // Signs an Ethereum transaction after having the user validate the parameters ledgerOpGetConfiguration ledgerOpcode = 0x06 // Returns specific wallet application configuration + ledgerOpSignTypedMessage ledgerOpcode = 0x0c // Signs an Ethereum message following the EIP 712 specification ledgerP1DirectlyFetchAddress ledgerParam1 = 0x00 // Return address directly from the wallet + ledgerP1InitTypedMessageData ledgerParam1 = 0x00 // First chunk of Typed Message data ledgerP1InitTransactionData ledgerParam1 = 0x00 // First transaction data block for signing ledgerP1ContTransactionData ledgerParam1 = 0x80 // Subsequent transaction data block for signing ledgerP2DiscardAddressChainCode ledgerParam2 = 0x00 // Do not return the chain code along with the address @@ -170,6 +172,24 @@ func (w *ledgerDriver) SignTx(path accounts.DerivationPath, tx *types.Transactio return w.ledgerSign(path, tx, chainID) } +// SignTypedMessage implements usbwallet.driver, sending the message to the Ledger and +// waiting for the user to sign or deny the transaction. +// +// Note: this was introduced in the ledger 1.5.0 firmware +func (w *ledgerDriver) SignTypedMessage(path accounts.DerivationPath, domainHash []byte, messageHash []byte) ([]byte, error) { + // If the Ethereum app doesn't run, abort + if w.offline() { + return nil, accounts.ErrWalletClosed + } + // Ensure the wallet is capable of signing the given transaction + if w.version[0] < 1 && w.version[1] < 5 { + //lint:ignore ST1005 brand name displayed on the console + return nil, fmt.Errorf("Ledger version >= 1.5.0 required for EIP-712 signing (found version v%d.%d.%d)", w.version[0], w.version[1], w.version[2]) + } + // All infos gathered and metadata checks out, request signing + return w.ledgerSignTypedMessage(path, domainHash, messageHash) +} + // ledgerVersion retrieves the current version of the Ethereum wallet app running // on the Ledger wallet. // @@ -367,6 +387,68 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction return sender, signed, nil } +// ledgerSignTypedMessage sends the transaction to the Ledger wallet, and waits for the user +// to confirm or deny the transaction. +// +// The signing protocol is defined as follows: +// +// CLA | INS | P1 | P2 | Lc | Le +// ----+-----+----+-----------------------------+-----+--- +// E0 | 0C | 00 | implementation version : 00 | variable | variable +// +// Where the input is: +// +// Description | Length +// -------------------------------------------------+---------- +// Number of BIP 32 derivations to perform (max 10) | 1 byte +// First derivation index (big endian) | 4 bytes +// ... | 4 bytes +// Last derivation index (big endian) | 4 bytes +// domain hash | 32 bytes +// message hash | 32 bytes +// +// +// +// And the output data is: +// +// Description | Length +// ------------+--------- +// signature V | 1 byte +// signature R | 32 bytes +// signature S | 32 bytes +func (w *ledgerDriver) ledgerSignTypedMessage(derivationPath []uint32, domainHash []byte, messageHash []byte) ([]byte, error) { + // Flatten the derivation path into the Ledger request + path := make([]byte, 1+4*len(derivationPath)) + path[0] = byte(len(derivationPath)) + for i, component := range derivationPath { + binary.BigEndian.PutUint32(path[1+4*i:], component) + } + // Create the 712 message + payload := append(path, domainHash...) + payload = append(payload, messageHash...) + + // Send the request and wait for the response + var ( + op = ledgerP1InitTypedMessageData + reply []byte + err error + ) + + // Send the message over, ensuring it's processed correctly + reply, err = w.ledgerExchange(ledgerOpSignTypedMessage, op, 0, payload) + + if err != nil { + return nil, err + } + + // Extract the Ethereum signature and do a sanity validation + if len(reply) != crypto.SignatureLength { + return nil, errors.New("reply lacks signature") + } + signature := append(reply[1:], reply[0]) + return signature, nil +} + // ledgerExchange performs a data exchange with the Ledger wallet, sending it a // message and retrieving the response. // diff --git a/accounts/usbwallet/trezor.go b/accounts/usbwallet/trezor.go index 0546458c47..c2182b88d0 100644 --- a/accounts/usbwallet/trezor.go +++ b/accounts/usbwallet/trezor.go @@ -185,6 +185,10 @@ func (w *trezorDriver) SignTx(path accounts.DerivationPath, tx *types.Transactio return w.trezorSign(path, tx, chainID) } +func (w *trezorDriver) SignTypedMessage(path accounts.DerivationPath, domainHash []byte, messageHash []byte) ([]byte, error) { + return nil, accounts.ErrNotSupported +} + // trezorDerive sends a derivation request to the Trezor device and returns the // Ethereum address located on that path. func (w *trezorDriver) trezorDerive(derivationPath []uint32) (common.Address, error) { diff --git a/accounts/usbwallet/wallet.go b/accounts/usbwallet/wallet.go index 9f74e5554f..b6f1814488 100644 --- a/accounts/usbwallet/wallet.go +++ b/accounts/usbwallet/wallet.go @@ -67,6 +67,8 @@ type driver interface { // SignTx sends the transaction to the USB device and waits for the user to confirm // or deny the transaction. SignTx(path accounts.DerivationPath, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) + + SignTypedMessage(path accounts.DerivationPath, messageHash []byte, domainHash []byte) ([]byte, error) } // wallet represents the common functionality shared by all USB hardware @@ -524,7 +526,46 @@ func (w *wallet) signHash(account accounts.Account, hash []byte) ([]byte, error) // SignData signs keccak256(data). The mimetype parameter describes the type of data being signed func (w *wallet) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) { - return w.signHash(account, crypto.Keccak256(data)) + + // Unless we are doing 712 signing, simply dispatch to signHash + if !(mimeType == accounts.MimetypeTypedData && len(data) == 66 && data[0] == 0x19 && data[1] == 0x01) { + return w.signHash(account, crypto.Keccak256(data)) + } + + // dispatch to 712 signing if the mimetype is TypedData and the format matches + w.stateLock.RLock() // Comms have own mutex, this is for the state fields + defer w.stateLock.RUnlock() + + // If the wallet is closed, abort + if w.device == nil { + return nil, accounts.ErrWalletClosed + } + // Make sure the requested account is contained within + path, ok := w.paths[account.Address] + if !ok { + return nil, accounts.ErrUnknownAccount + } + // All infos gathered and metadata checks out, request signing + <-w.commsLock + defer func() { w.commsLock <- struct{}{} }() + + // Ensure the device isn't screwed with while user confirmation is pending + // TODO(karalabe): remove if hotplug lands on Windows + w.hub.commsLock.Lock() + w.hub.commsPend++ + w.hub.commsLock.Unlock() + + defer func() { + w.hub.commsLock.Lock() + w.hub.commsPend-- + w.hub.commsLock.Unlock() + }() + // Sign the transaction + signature, err := w.driver.SignTypedMessage(path, data[2:34], data[34:66]) + if err != nil { + return nil, err + } + return signature, nil } // SignDataWithPassphrase implements accounts.Wallet, attempting to sign the given diff --git a/build/aleth-interpreter.sh b/build/aleth-interpreter.sh index c8e653817c..1a61d155d1 100755 --- a/build/aleth-interpreter.sh +++ b/build/aleth-interpreter.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/usr/bin/env bash set -e diff --git a/build/ci.go b/build/ci.go index 39f88db9c3..a3a4c3afaf 100644 --- a/build/ci.go +++ b/build/ci.go @@ -75,7 +75,6 @@ var ( "COPYING", executablePath("abigen"), executablePath("bootnode"), - executablePath("echaindb"), executablePath("echainspec"), executablePath("evm"), executablePath("geth"), @@ -93,10 +92,6 @@ var ( BinaryName: "bootnode", Description: "Ethereum bootnode.", }, - { - BinaryName: "echaindb", - Description: "Developer utility to inspect and manage the chain database", - }, { BinaryName: "echainspec", Description: "Developer utility to manage chain external chain configuration", diff --git a/build/evmone.sh b/build/evmone.sh index 760ccb9309..0aea9cddc4 100755 --- a/build/evmone.sh +++ b/build/evmone.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/usr/bin/env bash set -e diff --git a/build/hera.sh b/build/hera.sh index 7fb63be403..e21e6aca2f 100755 --- a/build/hera.sh +++ b/build/hera.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/usr/bin/env bash set -e diff --git a/build/ssvm.sh b/build/ssvm.sh index 9872c59903..949f2cc1cd 100755 --- a/build/ssvm.sh +++ b/build/ssvm.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/usr/bin/env bash set -e diff --git a/cmd/clef/README.md b/cmd/clef/README.md index 380d1b702c..9bbbd1d63c 100644 --- a/cmd/clef/README.md +++ b/cmd/clef/README.md @@ -919,4 +919,4 @@ There are a couple of implementation for a UI. We'll try to keep this list up to | QtSigner| https://github.com/holiman/qtsigner/| Python3/QT-based| :+1:| :+1:| :+1:| :+1:| :+1:| :x: | :+1: (partially)| | GtkSigner| https://github.com/holiman/gtksigner| Python3/GTK-based| :+1:| :x:| :x:| :+1:| :+1:| :x: | :x: | | Frame | https://github.com/floating/frame/commits/go-signer| Electron-based| :x:| :x:| :x:| :x:| ?| :x: | :x: | -| Clef UI| https://github.com/kyokan/clef-ui| Golang/QT-based| :+1:| :+1:| :x:| :+1:| :+1:| :x: | :+1: (approve tx only)| +| Clef UI| https://github.com/ethereum/clef-ui| Golang/QT-based| :+1:| :+1:| :x:| :+1:| :+1:| :x: | :+1: (approve tx only)| diff --git a/cmd/clef/sign_flow.png b/cmd/clef/sign_flow.png index 93ef81a32e..e7010ab43f 100644 Binary files a/cmd/clef/sign_flow.png and b/cmd/clef/sign_flow.png differ diff --git a/cmd/devp2p/dns_cloudflare.go b/cmd/devp2p/dns_cloudflare.go index a4d10dcfdd..596254df91 100644 --- a/cmd/devp2p/dns_cloudflare.go +++ b/cmd/devp2p/dns_cloudflare.go @@ -17,6 +17,7 @@ package main import ( + "context" "fmt" "strings" @@ -79,7 +80,7 @@ func (c *cloudflareClient) checkZone(name string) error { c.zoneID = id } log.Info(fmt.Sprintf("Checking Permissions on zone %s", c.zoneID)) - zone, err := c.ZoneDetails(c.zoneID) + zone, err := c.ZoneDetails(context.Background(), c.zoneID) if err != nil { return err } @@ -112,7 +113,7 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string) records = lrecords log.Info(fmt.Sprintf("Retrieving existing TXT records on %s", name)) - entries, err := c.DNSRecords(c.zoneID, cloudflare.DNSRecord{Type: "TXT"}) + entries, err := c.DNSRecords(context.Background(), c.zoneID, cloudflare.DNSRecord{Type: "TXT"}) if err != nil { return err } @@ -134,14 +135,15 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string) if path != name { ttl = treeNodeTTL // Max TTL permitted by Cloudflare } - _, err = c.CreateDNSRecord(c.zoneID, cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl}) + record := cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl} + _, err = c.CreateDNSRecord(context.Background(), c.zoneID, record) } else if old.Content != val { // Entry already exists, only change its content. log.Info(fmt.Sprintf("Updating %s from %q to %q", path, old.Content, val)) old.Content = val - err = c.UpdateDNSRecord(c.zoneID, old.ID, old) + err = c.UpdateDNSRecord(context.Background(), c.zoneID, old.ID, old) } else { - log.Info(fmt.Sprintf("Skipping %s = %q", path, val)) + log.Debug(fmt.Sprintf("Skipping %s = %q", path, val)) } if err != nil { return fmt.Errorf("failed to publish %s: %v", path, err) @@ -155,7 +157,7 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string) } // Stale entry, nuke it. log.Info(fmt.Sprintf("Deleting %s = %q", path, entry.Content)) - if err := c.DeleteDNSRecord(c.zoneID, entry.ID); err != nil { + if err := c.DeleteDNSRecord(context.Background(), c.zoneID, entry.ID); err != nil { return fmt.Errorf("failed to delete %s: %v", path, err) } } diff --git a/cmd/devp2p/dns_route53.go b/cmd/devp2p/dns_route53.go index c5f99529b8..2b6a30e60f 100644 --- a/cmd/devp2p/dns_route53.go +++ b/cmd/devp2p/dns_route53.go @@ -17,16 +17,19 @@ package main import ( + "context" "errors" "fmt" "sort" "strconv" "strings" + "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/route53" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/route53" + "github.com/aws/aws-sdk-go-v2/service/route53/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/dnsdisc" "gopkg.in/urfave/cli.v1" @@ -38,6 +41,7 @@ const ( // https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html#limits-api-requests-changeresourcerecordsets route53ChangeSizeLimit = 32000 route53ChangeCountLimit = 1000 + maxRetryLimit = 60 ) var ( @@ -55,10 +59,15 @@ var ( Name: "zone-id", Usage: "Route53 Zone ID", } + route53RegionFlag = cli.StringFlag{ + Name: "aws-region", + Usage: "AWS Region", + Value: "eu-central-1", + } ) type route53Client struct { - api *route53.Route53 + api *route53.Client zoneID string } @@ -72,15 +81,16 @@ func newRoute53Client(ctx *cli.Context) *route53Client { akey := ctx.String(route53AccessKeyFlag.Name) asec := ctx.String(route53AccessSecretFlag.Name) if akey == "" || asec == "" { - exit(fmt.Errorf("need Route53 Access Key ID and secret proceed")) + exit(fmt.Errorf("need Route53 Access Key ID and secret to proceed")) } - config := &aws.Config{Credentials: credentials.NewStaticCredentials(akey, asec, "")} - session, err := session.NewSession(config) + creds := aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(akey, asec, "")) + cfg, err := config.LoadDefaultConfig(context.Background(), config.WithCredentialsProvider(creds)) if err != nil { - exit(fmt.Errorf("can't create AWS session: %v", err)) + exit(fmt.Errorf("can't initialize AWS configuration: %v", err)) } + cfg.Region = ctx.String(route53RegionFlag.Name) return &route53Client{ - api: route53.New(session), + api: route53.NewFromConfig(cfg), zoneID: ctx.String(route53ZoneIDFlag.Name), } } @@ -105,25 +115,43 @@ func (c *route53Client) deploy(name string, t *dnsdisc.Tree) error { return nil } - // Submit change batches. + // Submit all change batches. batches := splitChanges(changes, route53ChangeSizeLimit, route53ChangeCountLimit) + changesToCheck := make([]*route53.ChangeResourceRecordSetsOutput, len(batches)) for i, changes := range batches { log.Info(fmt.Sprintf("Submitting %d changes to Route53", len(changes))) - batch := new(route53.ChangeBatch) - batch.SetChanges(changes) - batch.SetComment(fmt.Sprintf("enrtree update %d/%d of %s at seq %d", i+1, len(batches), name, t.Seq())) + batch := &types.ChangeBatch{ + Changes: changes, + Comment: aws.String(fmt.Sprintf("enrtree update %d/%d of %s at seq %d", i+1, len(batches), name, t.Seq())), + } req := &route53.ChangeResourceRecordSetsInput{HostedZoneId: &c.zoneID, ChangeBatch: batch} - resp, err := c.api.ChangeResourceRecordSets(req) + changesToCheck[i], err = c.api.ChangeResourceRecordSets(context.TODO(), req) if err != nil { return err } + } - log.Info(fmt.Sprintf("Waiting for change request %s", *resp.ChangeInfo.Id)) - wreq := &route53.GetChangeInput{Id: resp.ChangeInfo.Id} - if err := c.api.WaitUntilResourceRecordSetsChanged(wreq); err != nil { - return err + // Wait for all change batches to propagate. + for _, change := range changesToCheck { + log.Info(fmt.Sprintf("Waiting for change request %s", *change.ChangeInfo.Id)) + wreq := &route53.GetChangeInput{Id: change.ChangeInfo.Id} + var count int + for { + wresp, err := c.api.GetChange(context.TODO(), wreq) + if err != nil { + return err + } + + count++ + + if wresp.ChangeInfo.Status == types.ChangeStatusInsync || count >= maxRetryLimit { + break + } + + time.Sleep(30 * time.Second) } } + return nil } @@ -140,7 +168,7 @@ func (c *route53Client) findZoneID(name string) (string, error) { log.Info(fmt.Sprintf("Finding Route53 Zone ID for %s", name)) var req route53.ListHostedZonesByNameInput for { - resp, err := c.api.ListHostedZonesByName(&req) + resp, err := c.api.ListHostedZonesByName(context.TODO(), &req) if err != nil { return "", err } @@ -149,7 +177,7 @@ func (c *route53Client) findZoneID(name string) (string, error) { return *zone.Id, nil } } - if !*resp.IsTruncated { + if !resp.IsTruncated { break } req.DNSName = resp.NextDNSName @@ -159,7 +187,7 @@ func (c *route53Client) findZoneID(name string) (string, error) { } // computeChanges creates DNS changes for the given record. -func (c *route53Client) computeChanges(name string, records map[string]string, existing map[string]recordSet) []*route53.Change { +func (c *route53Client) computeChanges(name string, records map[string]string, existing map[string]recordSet) []types.Change { // Convert all names to lowercase. lrecords := make(map[string]string, len(records)) for name, r := range records { @@ -167,25 +195,30 @@ func (c *route53Client) computeChanges(name string, records map[string]string, e } records = lrecords - var changes []*route53.Change - for path, val := range records { + var changes []types.Change + for path, newValue := range records { + prevRecords, exists := existing[path] + prevValue := strings.Join(prevRecords.values, "") + + // prevValue contains quoted strings, encode newValue to compare. + newValue = splitTXT(newValue) + + // Assign TTL. ttl := int64(rootTTL) if path != name { ttl = int64(treeNodeTTL) } - prevRecords, exists := existing[path] - prevValue := strings.Join(prevRecords.values, "") if !exists { // Entry is unknown, push a new one - log.Info(fmt.Sprintf("Creating %s = %q", path, val)) - changes = append(changes, newTXTChange("CREATE", path, ttl, splitTXT(val))) - } else if prevValue != val || prevRecords.ttl != ttl { + log.Info(fmt.Sprintf("Creating %s = %s", path, newValue)) + changes = append(changes, newTXTChange("CREATE", path, ttl, newValue)) + } else if prevValue != newValue || prevRecords.ttl != ttl { // Entry already exists, only change its content. - log.Info(fmt.Sprintf("Updating %s from %q to %q", path, prevValue, val)) - changes = append(changes, newTXTChange("UPSERT", path, ttl, splitTXT(val))) + log.Info(fmt.Sprintf("Updating %s from %s to %s", path, prevValue, newValue)) + changes = append(changes, newTXTChange("UPSERT", path, ttl, newValue)) } else { - log.Info(fmt.Sprintf("Skipping %s = %q", path, val)) + log.Debug(fmt.Sprintf("Skipping %s = %s", path, newValue)) } } @@ -204,21 +237,21 @@ func (c *route53Client) computeChanges(name string, records map[string]string, e } // sortChanges ensures DNS changes are in leaf-added -> root-changed -> leaf-deleted order. -func sortChanges(changes []*route53.Change) { +func sortChanges(changes []types.Change) { score := map[string]int{"CREATE": 1, "UPSERT": 2, "DELETE": 3} sort.Slice(changes, func(i, j int) bool { - if *changes[i].Action == *changes[j].Action { + if changes[i].Action == changes[j].Action { return *changes[i].ResourceRecordSet.Name < *changes[j].ResourceRecordSet.Name } - return score[*changes[i].Action] < score[*changes[j].Action] + return score[string(changes[i].Action)] < score[string(changes[j].Action)] }) } // splitChanges splits up DNS changes such that each change batch // is smaller than the given RDATA limit. -func splitChanges(changes []*route53.Change, sizeLimit, countLimit int) [][]*route53.Change { +func splitChanges(changes []types.Change, sizeLimit, countLimit int) [][]types.Change { var ( - batches [][]*route53.Change + batches [][]types.Change batchSize int batchCount int ) @@ -241,7 +274,7 @@ func splitChanges(changes []*route53.Change, sizeLimit, countLimit int) [][]*rou } // changeSize returns the RDATA size of a DNS change. -func changeSize(ch *route53.Change) int { +func changeSize(ch types.Change) int { size := 0 for _, rr := range ch.ResourceRecordSet.ResourceRecords { if rr.Value != nil { @@ -251,8 +284,8 @@ func changeSize(ch *route53.Change) int { return size } -func changeCount(ch *route53.Change) int { - if *ch.Action == "UPSERT" { +func changeCount(ch types.Change) int { + if ch.Action == types.ChangeActionUpsert { return 2 } return 1 @@ -260,13 +293,17 @@ func changeCount(ch *route53.Change) int { // collectRecords collects all TXT records below the given name. func (c *route53Client) collectRecords(name string) (map[string]recordSet, error) { - log.Info(fmt.Sprintf("Retrieving existing TXT records on %s (%s)", name, c.zoneID)) var req route53.ListResourceRecordSetsInput - req.SetHostedZoneId(c.zoneID) + req.HostedZoneId = &c.zoneID existing := make(map[string]recordSet) - err := c.api.ListResourceRecordSetsPages(&req, func(resp *route53.ListResourceRecordSetsOutput, last bool) bool { + for page := 0; ; page++ { + log.Info("Loading existing TXT records", "name", name, "zone", c.zoneID, "page", page) + resp, err := c.api.ListResourceRecordSets(context.TODO(), &req) + if err != nil { + return existing, err + } for _, set := range resp.ResourceRecordSets { - if !isSubdomain(*set.Name, name) || *set.Type != "TXT" { + if !isSubdomain(*set.Name, name) || set.Type != types.RRTypeTxt { continue } s := recordSet{ttl: *set.TTL} @@ -276,28 +313,44 @@ func (c *route53Client) collectRecords(name string) (map[string]recordSet, error name := strings.TrimSuffix(*set.Name, ".") existing[name] = s } - return true - }) - return existing, err + + if !resp.IsTruncated { + break + } + // Set the cursor to the next batch. From the AWS docs: + // + // To display the next page of results, get the values of NextRecordName, + // NextRecordType, and NextRecordIdentifier (if any) from the response. Then submit + // another ListResourceRecordSets request, and specify those values for + // StartRecordName, StartRecordType, and StartRecordIdentifier. + req.StartRecordIdentifier = resp.NextRecordIdentifier + req.StartRecordName = resp.NextRecordName + req.StartRecordType = resp.NextRecordType + } + + return existing, nil } // newTXTChange creates a change to a TXT record. -func newTXTChange(action, name string, ttl int64, values ...string) *route53.Change { - var c route53.Change - var r route53.ResourceRecordSet - var rrs []*route53.ResourceRecord +func newTXTChange(action, name string, ttl int64, values ...string) types.Change { + r := types.ResourceRecordSet{ + Type: types.RRTypeTxt, + Name: &name, + TTL: &ttl, + } + var rrs []types.ResourceRecord for _, val := range values { - rr := new(route53.ResourceRecord) - rr.SetValue(val) + var rr types.ResourceRecord + rr.Value = aws.String(val) rrs = append(rrs, rr) } - r.SetType("TXT") - r.SetName(name) - r.SetTTL(ttl) - r.SetResourceRecords(rrs) - c.SetAction(action) - c.SetResourceRecordSet(&r) - return &c + + r.ResourceRecords = rrs + + return types.Change{ + Action: types.ChangeAction(action), + ResourceRecordSet: &r, + } } // isSubdomain returns true if name is a subdomain of domain. diff --git a/cmd/devp2p/dns_route53_test.go b/cmd/devp2p/dns_route53_test.go index a2ef3791f6..e6eb516e6b 100644 --- a/cmd/devp2p/dns_route53_test.go +++ b/cmd/devp2p/dns_route53_test.go @@ -20,7 +20,7 @@ import ( "reflect" "testing" - "github.com/aws/aws-sdk-go/service/route53" + "github.com/aws/aws-sdk-go-v2/service/route53/types" ) // This test checks that computeChanges/splitChanges create DNS changes in @@ -43,93 +43,93 @@ func TestRoute53ChangeSort(t *testing.T) { "MHTDO6TMUBRIA2XWG5LUDACK24.n": "enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o", } - wantChanges := []*route53.Change{ + wantChanges := []types.Change{ { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("2xs2367yhaxjfglzhvawlqd4zy.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("c7hrfpf3blgf3yr4dy5kx3smbe.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("h4fht4b454p6uxfd7jcyq5pwdy.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("jwxydbpxywg6fx3gmdibfa6cj4.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("mhtdo6tmubria2xwg5ludack24.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("UPSERT"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "UPSERT", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA"`), }}, TTL: ip(rootTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("DELETE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "DELETE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("2kfjogvxdqtxxugbh7gs7naaai.n"), - ResourceRecords: []*route53.ResourceRecord{ + ResourceRecords: []types.ResourceRecord{ {Value: sp(`"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`)}, }, TTL: ip(3333), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("DELETE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "DELETE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("fdxn3sn67na5dka4j2gok7bvqi.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enrtree-branch:"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, } @@ -141,7 +141,7 @@ func TestRoute53ChangeSort(t *testing.T) { } // Check splitting according to size. - wantSplit := [][]*route53.Change{ + wantSplit := [][]types.Change{ wantChanges[:4], wantChanges[4:6], wantChanges[6:], @@ -152,7 +152,7 @@ func TestRoute53ChangeSort(t *testing.T) { } // Check splitting according to count. - wantSplit = [][]*route53.Change{ + wantSplit = [][]types.Change{ wantChanges[:5], wantChanges[5:], } @@ -162,5 +162,29 @@ func TestRoute53ChangeSort(t *testing.T) { } } +// This test checks that computeChanges compares the quoted value of the records correctly. +func TestRoute53NoChange(t *testing.T) { + // Existing record set. + testTree0 := map[string]recordSet{ + "n": {ttl: rootTTL, values: []string{ + `"enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA"`, + }}, + "2xs2367yhaxjfglzhvawlqd4zy.n": {ttl: treeNodeTTL, values: []string{ + `"enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA"`, + }}, + } + // New set. + testTree1 := map[string]string{ + "n": "enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA", + "2XS2367YHAXJFGLZHVAWLQD4ZY.n": "enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA", + } + + var client route53Client + changes := client.computeChanges("n", testTree1, testTree0) + if len(changes) > 0 { + t.Fatalf("wrong changes (got %d, want 0)", len(changes)) + } +} + func sp(s string) *string { return &s } func ip(i int64) *int64 { return &i } diff --git a/cmd/devp2p/dnscmd.go b/cmd/devp2p/dnscmd.go index f56f0f34e4..50ab7bf983 100644 --- a/cmd/devp2p/dnscmd.go +++ b/cmd/devp2p/dnscmd.go @@ -77,7 +77,12 @@ var ( Usage: "Deploy DNS TXT records to Amazon Route53", ArgsUsage: "", Action: dnsToRoute53, - Flags: []cli.Flag{route53AccessKeyFlag, route53AccessSecretFlag, route53ZoneIDFlag}, + Flags: []cli.Flag{ + route53AccessKeyFlag, + route53AccessSecretFlag, + route53ZoneIDFlag, + route53RegionFlag, + }, } ) diff --git a/cmd/devp2p/internal/ethtest/chain_test.go b/cmd/devp2p/internal/ethtest/chain_test.go index 5e4289d80a..ec98833ab5 100644 --- a/cmd/devp2p/internal/ethtest/chain_test.go +++ b/cmd/devp2p/internal/ethtest/chain_test.go @@ -35,7 +35,31 @@ func TestEthProtocolNegotiation(t *testing.T) { expected uint32 }{ { - conn: &Conn{}, + conn: &Conn{ + ourHighestProtoVersion: 65, + }, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + expected: uint32(65), + }, + { + conn: &Conn{ + ourHighestProtoVersion: 65, + }, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + expected: uint32(65), + }, + { + conn: &Conn{ + ourHighestProtoVersion: 65, + }, caps: []p2p.Cap{ {Name: "eth", Version: 63}, {Name: "eth", Version: 64}, @@ -44,7 +68,20 @@ func TestEthProtocolNegotiation(t *testing.T) { expected: uint32(65), }, { - conn: &Conn{}, + conn: &Conn{ + ourHighestProtoVersion: 64, + }, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + expected: 64, + }, + { + conn: &Conn{ + ourHighestProtoVersion: 65, + }, caps: []p2p.Cap{ {Name: "eth", Version: 0}, {Name: "eth", Version: 89}, @@ -53,7 +90,20 @@ func TestEthProtocolNegotiation(t *testing.T) { expected: uint32(65), }, { - conn: &Conn{}, + conn: &Conn{ + ourHighestProtoVersion: 64, + }, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "wrongProto", Version: 65}, + }, + expected: uint32(64), + }, + { + conn: &Conn{ + ourHighestProtoVersion: 65, + }, caps: []p2p.Cap{ {Name: "eth", Version: 63}, {Name: "eth", Version: 64}, @@ -66,7 +116,7 @@ func TestEthProtocolNegotiation(t *testing.T) { for i, tt := range tests { t.Run(strconv.Itoa(i), func(t *testing.T) { tt.conn.negotiateEthProtocol(tt.caps) - assert.Equal(t, tt.expected, uint32(tt.conn.ethProtocolVersion)) + assert.Equal(t, tt.expected, uint32(tt.conn.negotiatedProtoVersion)) }) } } diff --git a/cmd/devp2p/internal/ethtest/eth66_suite.go b/cmd/devp2p/internal/ethtest/eth66_suite.go index 9ad6e4fa5e..276328f279 100644 --- a/cmd/devp2p/internal/ethtest/eth66_suite.go +++ b/cmd/devp2p/internal/ethtest/eth66_suite.go @@ -26,6 +26,16 @@ import ( "github.com/ethereum/go-ethereum/p2p" ) +// Is_66 checks if the node supports the eth66 protocol version, +// and if not, exists the test suite +func (s *Suite) Is_66(t *utesting.T) { + conn := s.dial66(t) + conn.handshake(t) + if conn.negotiatedProtoVersion < 66 { + t.Fail() + } +} + // TestStatus_66 attempts to connect to the given node and exchange // a status message with it on the eth66 protocol, and then check to // make sure the chain head is correct. @@ -205,6 +215,10 @@ func (s *Suite) TestLargeAnnounce_66(t *utesting.T) { } } +func (s *Suite) TestOldAnnounce_66(t *utesting.T) { + s.oldAnnounce(t, s.setupConnection66(t), s.setupConnection66(t)) +} + // TestMaliciousHandshake_66 tries to send malicious data during the handshake. func (s *Suite) TestMaliciousHandshake_66(t *utesting.T) { conn := s.dial66(t) diff --git a/cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go b/cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go index 577f9d2ae5..d8faf31f74 100644 --- a/cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go +++ b/cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go @@ -46,6 +46,7 @@ func (s *Suite) dial66(t *utesting.T) *Conn { t.Fatalf("could not dial: %v", err) } conn.caps = append(conn.caps, p2p.Cap{Name: "eth", Version: 66}) + conn.ourHighestProtoVersion = 66 return conn } @@ -141,8 +142,11 @@ func (c *Conn) readAndServe66(chain *Chain, timeout time.Duration) (uint64, Mess if err != nil { return 0, errorf("could not get headers for inbound header request: %v", err) } - - if err := c.Write(headers); err != nil { + resp := ð.BlockHeadersPacket66{ + RequestId: reqID, + BlockHeadersPacket: eth.BlockHeadersPacket(headers), + } + if err := c.write66(resp, BlockHeaders{}.Code()); err != nil { return 0, errorf("could not write to connection: %v", err) } default: diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go index f1299589c4..f43545ad5a 100644 --- a/cmd/devp2p/internal/ethtest/suite.go +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -19,6 +19,7 @@ package ethtest import ( "fmt" "net" + "strings" "time" "github.com/davecgh/go-spew/spew" @@ -65,7 +66,7 @@ func NewSuite(dest *enode.Node, chainfile string, genesisfile string) (*Suite, e }, nil } -func (s *Suite) EthTests() []utesting.Test { +func (s *Suite) AllEthTests() []utesting.Test { return []utesting.Test{ // status {Name: "Status", Fn: s.TestStatus}, @@ -84,6 +85,8 @@ func (s *Suite) EthTests() []utesting.Test { {Name: "Broadcast_66", Fn: s.TestBroadcast_66}, {Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce}, {Name: "TestLargeAnnounce_66", Fn: s.TestLargeAnnounce_66}, + {Name: "TestOldAnnounce", Fn: s.TestOldAnnounce}, + {Name: "TestOldAnnounce_66", Fn: s.TestOldAnnounce_66}, // malicious handshakes + status {Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake}, {Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus}, @@ -97,6 +100,38 @@ func (s *Suite) EthTests() []utesting.Test { } } +func (s *Suite) EthTests() []utesting.Test { + return []utesting.Test{ + {Name: "Status", Fn: s.TestStatus}, + {Name: "GetBlockHeaders", Fn: s.TestGetBlockHeaders}, + {Name: "GetBlockBodies", Fn: s.TestGetBlockBodies}, + {Name: "Broadcast", Fn: s.TestBroadcast}, + {Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce}, + {Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake}, + {Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus}, + {Name: "TestMaliciousStatus_66", Fn: s.TestMaliciousStatus}, + {Name: "TestTransactions", Fn: s.TestTransaction}, + {Name: "TestMaliciousTransactions", Fn: s.TestMaliciousTx}, + } +} + +func (s *Suite) Eth66Tests() []utesting.Test { + return []utesting.Test{ + // only proceed with eth66 test suite if node supports eth 66 protocol + {Name: "Status_66", Fn: s.TestStatus_66}, + {Name: "GetBlockHeaders_66", Fn: s.TestGetBlockHeaders_66}, + {Name: "TestSimultaneousRequests_66", Fn: s.TestSimultaneousRequests_66}, + {Name: "TestSameRequestID_66", Fn: s.TestSameRequestID_66}, + {Name: "TestZeroRequestID_66", Fn: s.TestZeroRequestID_66}, + {Name: "GetBlockBodies_66", Fn: s.TestGetBlockBodies_66}, + {Name: "Broadcast_66", Fn: s.TestBroadcast_66}, + {Name: "TestLargeAnnounce_66", Fn: s.TestLargeAnnounce_66}, + {Name: "TestMaliciousHandshake_66", Fn: s.TestMaliciousHandshake_66}, + {Name: "TestTransactions_66", Fn: s.TestTransaction_66}, + {Name: "TestMaliciousTransactions_66", Fn: s.TestMaliciousTx_66}, + } +} + // TestStatus attempts to connect to the given node and exchange // a status message with it, and then check to make sure // the chain head is correct. @@ -125,7 +160,7 @@ func (s *Suite) TestMaliciousStatus(t *utesting.T) { // get protoHandshake conn.handshake(t) status := &Status{ - ProtocolVersion: uint32(conn.ethProtocolVersion), + ProtocolVersion: uint32(conn.negotiatedProtoVersion), NetworkID: s.chain.chainConfig.GetChainID().Uint64(), TD: largeNumber(2), Head: s.chain.blocks[s.chain.Len()-1].Hash(), @@ -357,6 +392,36 @@ func (s *Suite) TestLargeAnnounce(t *utesting.T) { } } +func (s *Suite) TestOldAnnounce(t *utesting.T) { + s.oldAnnounce(t, s.setupConnection(t), s.setupConnection(t)) +} + +func (s *Suite) oldAnnounce(t *utesting.T, sendConn, receiveConn *Conn) { + oldBlockAnnounce := &NewBlock{ + Block: s.chain.blocks[len(s.chain.blocks)/2], + TD: s.chain.blocks[len(s.chain.blocks)/2].Difficulty(), + } + + if err := sendConn.Write(oldBlockAnnounce); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + + switch msg := receiveConn.ReadAndServe(s.chain, timeout*2).(type) { + case *NewBlock: + t.Fatalf("unexpected: block propagated: %s", pretty.Sdump(msg)) + case *NewBlockHashes: + t.Fatalf("unexpected: block announced: %s", pretty.Sdump(msg)) + case *Error: + errMsg := *msg + // check to make sure error is timeout (propagation didn't come through == test successful) + if !strings.Contains(errMsg.String(), "timeout") { + t.Fatalf("unexpected error: %v", pretty.Sdump(msg)) + } + default: + t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + } +} + func (s *Suite) testAnnounce(t *utesting.T, sendConn, receiveConn *Conn, blockAnnouncement *NewBlock) { // Announce the block. if err := sendConn.Write(blockAnnouncement); err != nil { @@ -421,6 +486,7 @@ func (s *Suite) dial() (*Conn, error) { {Name: "eth", Version: 64}, {Name: "eth", Version: 65}, } + conn.ourHighestProtoVersion = 65 return &conn, nil } diff --git a/cmd/devp2p/internal/ethtest/types.go b/cmd/devp2p/internal/ethtest/types.go index 99b8137742..66d6a08546 100644 --- a/cmd/devp2p/internal/ethtest/types.go +++ b/cmd/devp2p/internal/ethtest/types.go @@ -123,9 +123,10 @@ func (nb NewPooledTransactionHashes) Code() int { return 24 } // Conn represents an individual connection with a peer type Conn struct { *rlpx.Conn - ourKey *ecdsa.PrivateKey - ethProtocolVersion uint - caps []p2p.Cap + ourKey *ecdsa.PrivateKey + negotiatedProtoVersion uint + ourHighestProtoVersion uint + caps []p2p.Cap } func (c *Conn) Read() Message { @@ -236,7 +237,7 @@ func (c *Conn) handshake(t *utesting.T) Message { c.SetSnappy(true) } c.negotiateEthProtocol(msg.Caps) - if c.ethProtocolVersion == 0 { + if c.negotiatedProtoVersion == 0 { t.Fatalf("unexpected eth protocol version") } return msg @@ -254,11 +255,11 @@ func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) { if capability.Name != "eth" { continue } - if capability.Version > highestEthVersion && capability.Version <= 65 { + if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion { highestEthVersion = capability.Version } } - c.ethProtocolVersion = highestEthVersion + c.negotiatedProtoVersion = highestEthVersion } // statusExchange performs a `Status` message exchange with the given @@ -273,14 +274,15 @@ loop: for { switch msg := c.Read().(type) { case *Status: - if msg.Head != chain.blocks[chain.Len()-1].Hash() { - t.Fatalf("wrong head block in status: %s", msg.Head.String()) + if have, want := msg.Head, chain.blocks[chain.Len()-1].Hash(); have != want { + t.Fatalf("wrong head block in status, want: %#x (block %d) have %#x", + want, chain.blocks[chain.Len()-1].NumberU64(), have) } - if msg.TD.Cmp(chain.TD(chain.Len())) != 0 { - t.Fatalf("wrong TD in status: %v", msg.TD) + if have, want := msg.TD.Cmp(chain.TD(chain.Len())), 0; have != want { + t.Fatalf("wrong TD in status: have %v want %v", have, want) } - if !reflect.DeepEqual(msg.ForkID, chain.ForkID()) { - t.Fatalf("wrong fork ID in status: %v", msg.ForkID) + if have, want := msg.ForkID, chain.ForkID(); !reflect.DeepEqual(have, want) { + t.Fatalf("wrong fork ID in status: have %v, want %v", have, want) } message = msg break loop @@ -294,13 +296,13 @@ loop: } } // make sure eth protocol version is set for negotiation - if c.ethProtocolVersion == 0 { + if c.negotiatedProtoVersion == 0 { t.Fatalf("eth protocol version must be set in Conn") } if status == nil { // write status message to client status = &Status{ - ProtocolVersion: uint32(c.ethProtocolVersion), + ProtocolVersion: uint32(c.negotiatedProtoVersion), NetworkID: chain.chainConfig.GetChainID().Uint64(), TD: chain.TD(chain.Len()), Head: chain.blocks[chain.Len()-1].Hash(), diff --git a/cmd/devp2p/nodesetcmd.go b/cmd/devp2p/nodesetcmd.go index dcc9f3c367..16e77bb556 100644 --- a/cmd/devp2p/nodesetcmd.go +++ b/cmd/devp2p/nodesetcmd.go @@ -179,7 +179,7 @@ func ethFilter(args []string) (nodeFilter, error) { f := func(n nodeJSON) bool { var eth struct { ForkID forkid.ID - _ []rlp.RawValue `rlp:"tail"` + Tail []rlp.RawValue `rlp:"tail"` } if n.N.Load(enr.WithEntry("eth", ð)) != nil { return false @@ -192,7 +192,7 @@ func ethFilter(args []string) (nodeFilter, error) { func lesFilter(args []string) (nodeFilter, error) { f := func(n nodeJSON) bool { var les struct { - _ []rlp.RawValue `rlp:"tail"` + Tail []rlp.RawValue `rlp:"tail"` } return n.N.Load(enr.WithEntry("les", &les)) == nil } @@ -202,7 +202,7 @@ func lesFilter(args []string) (nodeFilter, error) { func snapFilter(args []string) (nodeFilter, error) { f := func(n nodeJSON) bool { var snap struct { - _ []rlp.RawValue `rlp:"tail"` + Tail []rlp.RawValue `rlp:"tail"` } return n.N.Load(enr.WithEntry("snap", &snap)) == nil } diff --git a/cmd/devp2p/rlpxcmd.go b/cmd/devp2p/rlpxcmd.go index ac92818aa4..24a16f0b3c 100644 --- a/cmd/devp2p/rlpxcmd.go +++ b/cmd/devp2p/rlpxcmd.go @@ -22,6 +22,7 @@ import ( "github.com/ethereum/go-ethereum/cmd/devp2p/internal/ethtest" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/utesting" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/rlpx" "github.com/ethereum/go-ethereum/rlp" @@ -98,5 +99,10 @@ func rlpxEthTest(ctx *cli.Context) error { if err != nil { exit(err) } - return runTests(ctx, suite.EthTests()) + // check if given node supports eth66, and if so, run eth66 protocol tests as well + is66Failed, _ := utesting.Run(utesting.Test{Name: "Is_66", Fn: suite.Is_66}) + if is66Failed { + return runTests(ctx, suite.EthTests()) + } + return runTests(ctx, suite.AllEthTests()) } diff --git a/cmd/echaindb/cmd/readChainconfig.go b/cmd/echaindb/cmd/readChainconfig.go deleted file mode 100644 index b098952cd8..0000000000 --- a/cmd/echaindb/cmd/readChainconfig.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright © 2020 NAME HERE - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package cmd - -import ( - "fmt" - "log" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/spf13/cobra" - "github.com/tidwall/pretty" -) - -// readChainconfigCmd represents the readChainconfig command -var readChainconfigCmd = &cobra.Command{ - Use: "read-chainconfig", - Short: "Print stored chain config", - Long: `Chain configs are stored in the database and can be printed (dumped). -This command does that. - -Use: - - read-chainconfig <0xgenesisHash> - -Example: - - read-chainconfig 0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3 - -`, - Run: func(cmd *cobra.Command, args []string) { - if len(args) == 0 { - log.Fatal("need canonical hash key for config") - } - - log.Println("Opening database...") - db, err := rawdb.NewLevelDBDatabase(chainDBPath, 256, 16, "") - if err != nil { - log.Fatal(err) - } - - data, err := db.Get(rawdb.ConfigKey(common.HexToHash(args[0]))) - if err != nil { - log.Fatal(err) - } - - data = pretty.Pretty(data) - fmt.Println(string(data)) - }, -} - -func init() { - rootCmd.AddCommand(readChainconfigCmd) - - // Here you will define your flags and configuration settings. - - // Cobra supports Persistent Flags which will work for this command - // and all subcommands, e.g.: - // readChainconfigCmd.PersistentFlags().String("foo", "", "A help for foo") - - // Cobra supports local flags which will only run when this command - // is called directly, e.g.: - // readChainconfigCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") -} diff --git a/cmd/echaindb/cmd/root.go b/cmd/echaindb/cmd/root.go deleted file mode 100644 index d4c8829938..0000000000 --- a/cmd/echaindb/cmd/root.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright © 2020 NAME HERE - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package cmd - -import ( - "errors" - "fmt" - "os" - - "github.com/spf13/cobra" - - homedir "github.com/mitchellh/go-homedir" - "github.com/spf13/viper" -) - -var cfgFile string - -var ( - chainDBPath string = "./chaindata" -) - -// rootCmd represents the base command when called without any subcommands -var rootCmd = &cobra.Command{ - Use: "echaindb", - Short: "A utility for inspecting and managing the chain database.", - Long: ``, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if chainDBPath == "" { - return errors.New("empty chaindb path") - } - f, err := os.Stat(chainDBPath) - if err != nil { - return err - } - if !f.IsDir() { - return errors.New("chaindb path is not a directory") - } - - return nil - }, -} - -// Execute adds all child commands to the root command and sets flags appropriately. -// This is called by main.main(). It only needs to happen once to the rootCmd. -func Execute() { - if err := rootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(1) - } -} - -func init() { - cobra.OnInitialize(initConfig) - - // Here you will define your flags and configuration settings. - // Cobra supports persistent flags, which, if defined here, - // will be global for your application. - - rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.echaindb.yaml)") - rootCmd.PersistentFlags().StringVar(&chainDBPath, "chaindb", "./chaindata", "path to chaindata directory") - - // Cobra also supports local flags, which will only run - // when this action is called directly. - rootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") -} - -// initConfig reads in config file and ENV variables if set. -func initConfig() { - if cfgFile != "" { - // Use config file from the flag. - viper.SetConfigFile(cfgFile) - } else { - // Find home directory. - home, err := homedir.Dir() - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - // Search config in home directory with name ".echaindb" (without extension). - viper.AddConfigPath(home) - viper.SetConfigName(".echaindb") - } - - viper.AutomaticEnv() // read in environment variables that match - - // If a config file is found, read it in. - if err := viper.ReadInConfig(); err == nil { - fmt.Println("Using config file:", viper.ConfigFileUsed()) - } -} diff --git a/cmd/echaindb/cmd/writeChainconfig.go b/cmd/echaindb/cmd/writeChainconfig.go deleted file mode 100644 index d4163a4297..0000000000 --- a/cmd/echaindb/cmd/writeChainconfig.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Copyright © 2020 NAME HERE - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package cmd - -import ( - "io/ioutil" - "log" - "os" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/params/confp/generic" - "github.com/spf13/cobra" -) - -// writeChainconfigCmd represents the writeChainconfig command -var writeChainconfigCmd = &cobra.Command{ - Use: "write-chainconfig", - Short: "Write a chain config to the database", - Long: `Chain configuration is stored in the chain database. -This command writes that value. - -Value to write is taken from standard input (stdin). - -Use: - - echaindb --chaindb write-chainconfig <0xgenesisHash> - -Example: - - cat myconfig.json | echaindb --chaindb ./path/to/chaindata write-chainconfig 0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3 - - echaindb --chaindb ./path/to/chaindata write-chainconfig 0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3 < myconfig.json - -`, - Run: func(cmd *cobra.Command, args []string) { - if len(args) == 0 { - log.Fatal("need canonical hash key for config") - } - - bs, err := ioutil.ReadAll(os.Stdin) - if err != nil { - log.Fatal(err) - } - - conf, err := generic.UnmarshalChainConfigurator(bs) - if err != nil { - log.Fatal(err) - } - - log.Println("Opening database...") - db, err := rawdb.NewLevelDBDatabase(chainDBPath, 256, 16, "") - if err != nil { - log.Fatal(err) - } - - rawdb.WriteChainConfig(db, common.HexToHash(args[0]), conf) - }, -} - -func init() { - rootCmd.AddCommand(writeChainconfigCmd) - - // Here you will define your flags and configuration settings. - - // Cobra supports Persistent Flags which will work for this command - // and all subcommands, e.g.: - // writeChainconfigCmd.PersistentFlags().String("foo", "", "A help for foo") - - // Cobra supports local flags which will only run when this command - // is called directly, e.g.: - // writeChainconfigCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") -} diff --git a/cmd/echaindb/main.go b/cmd/echaindb/main.go deleted file mode 100644 index 90e5a93421..0000000000 --- a/cmd/echaindb/main.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright © 2020 NAME HERE - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package main - -import "github.com/ethereum/go-ethereum/cmd/echaindb/cmd" - -func main() { - cmd.Execute() -} diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 6927ca996a..5612915ea0 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -61,7 +61,14 @@ It expects the genesis file as argument.`, Usage: "Dumps genesis block JSON configuration to stdout", ArgsUsage: "", Flags: []cli.Flag{ - utils.DataDirFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.YoloV3Flag, + utils.ClassicFlag, + utils.MordorFlag, + utils.KottiFlag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` @@ -193,7 +200,7 @@ func initGenesis(ctx *cli.Context) error { defer stack.Close() for _, name := range []string{"chaindata", "lightchaindata"} { - chaindb, err := stack.OpenDatabase(name, 0, 0, "") + chaindb, err := stack.OpenDatabase(name, 0, 0, "", false) if err != nil { utils.Fatalf("Failed to open database: %v", err) } @@ -208,6 +215,7 @@ func initGenesis(ctx *cli.Context) error { } func dumpGenesis(ctx *cli.Context) error { + // TODO(rjl493456442) support loading from the custom datadir genesis := utils.MakeGenesis(ctx) if genesis == nil { genesis = params.DefaultGenesisBlock() @@ -230,7 +238,7 @@ func importChain(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - chain, db := utils.MakeChain(ctx, stack, false) + chain, db := utils.MakeChain(ctx, stack) defer db.Close() // Start periodically gathering memory profiles @@ -305,7 +313,7 @@ func exportChain(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - chain, _ := utils.MakeChain(ctx, stack, true) + chain, _ := utils.MakeChain(ctx, stack) start := time.Now() var err error @@ -322,6 +330,9 @@ func exportChain(ctx *cli.Context) error { if first < 0 || last < 0 { utils.Fatalf("Export error: block number must be greater than 0\n") } + if head := chain.CurrentFastBlock(); uint64(last) > head.NumberU64() { + utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.NumberU64()) + } err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last)) } @@ -341,7 +352,7 @@ func importPreimages(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - db := utils.MakeChainDatabase(ctx, stack) + db := utils.MakeChainDatabase(ctx, stack, false) start := time.Now() if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil { @@ -360,7 +371,7 @@ func exportPreimages(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - db := utils.MakeChainDatabase(ctx, stack) + db := utils.MakeChainDatabase(ctx, stack, true) start := time.Now() if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil { @@ -374,21 +385,27 @@ func dump(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - chain, chainDb := utils.MakeChain(ctx, stack, true) - defer chainDb.Close() + db := utils.MakeChainDatabase(ctx, stack, true) for _, arg := range ctx.Args() { - var block *types.Block + var header *types.Header if hashish(arg) { - block = chain.GetBlockByHash(common.HexToHash(arg)) + hash := common.HexToHash(arg) + number := rawdb.ReadHeaderNumber(db, hash) + if number != nil { + header = rawdb.ReadHeader(db, hash, *number) + } } else { - num, _ := strconv.Atoi(arg) - block = chain.GetBlockByNumber(uint64(num)) + number, _ := strconv.Atoi(arg) + hash := rawdb.ReadCanonicalHash(db, uint64(number)) + if hash != (common.Hash{}) { + header = rawdb.ReadHeader(db, hash, uint64(number)) + } } - if block == nil { + if header == nil { fmt.Println("{}") utils.Fatalf("block not found") } else { - state, err := state.New(block.Root(), state.NewDatabase(chainDb), nil) + state, err := state.New(header.Root, state.NewDatabase(db), nil) if err != nil { utils.Fatalf("could not create new state: %v", err) } diff --git a/cmd/geth/dao_test.go b/cmd/geth/dao_test.go index 21fa64d1df..67a5f89772 100644 --- a/cmd/geth/dao_test.go +++ b/cmd/geth/dao_test.go @@ -107,7 +107,7 @@ func testDAOForkBlockNewChain(t *testing.T, test int, genesis string, expectBloc } // Retrieve the DAO config flag from the database path := filepath.Join(datadir, "geth", "chaindata") - db, err := rawdb.NewLevelDBDatabase(path, 0, 0, "") + db, err := rawdb.NewLevelDBDatabase(path, 0, 0, "", false) if err != nil { t.Fatalf("test %d: failed to open test database: %v", test, err) } diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index 48478f613e..dcdbb3e270 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -20,6 +20,7 @@ import ( "fmt" "os" "path/filepath" + "strconv" "time" "github.com/ethereum/go-ethereum/cmd/utils" @@ -28,9 +29,8 @@ import ( "github.com/ethereum/go-ethereum/console/prompt" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/ethdb/leveldb" "github.com/ethereum/go-ethereum/log" - "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/ethereum/go-ethereum/trie" "gopkg.in/urfave/cli.v1" ) @@ -59,52 +59,145 @@ Remove blockchain and state databases`, dbGetCmd, dbDeleteCmd, dbPutCmd, + dbGetSlotsCmd, }, } dbInspectCmd = cli.Command{ Action: utils.MigrateFlags(inspect), Name: "inspect", ArgsUsage: " ", - + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.YoloV3Flag, + utils.ClassicFlag, + utils.MordorFlag, + utils.KottiFlag, + }, Usage: "Inspect the storage size for each type of data in the database", Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`, } dbStatCmd = cli.Command{ - Action: dbStats, + Action: utils.MigrateFlags(dbStats), Name: "stats", Usage: "Print leveldb statistics", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.YoloV3Flag, + utils.ClassicFlag, + utils.MordorFlag, + utils.KottiFlag, + }, } dbCompactCmd = cli.Command{ - Action: dbCompact, + Action: utils.MigrateFlags(dbCompact), Name: "compact", Usage: "Compact leveldb database. WARNING: May take a very long time", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.YoloV3Flag, + utils.ClassicFlag, + utils.MordorFlag, + utils.KottiFlag, + utils.CacheFlag, + utils.CacheDatabaseFlag, + }, Description: `This command performs a database compaction. WARNING: This operation may take a very long time to finish, and may cause database corruption if it is aborted during execution'!`, } dbGetCmd = cli.Command{ - Action: dbGet, - Name: "get", - Usage: "Show the value of a database key", - ArgsUsage: "", + Action: utils.MigrateFlags(dbGet), + Name: "get", + Usage: "Show the value of a database key", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.YoloV3Flag, + utils.ClassicFlag, + utils.MordorFlag, + utils.KottiFlag, + }, Description: "This command looks up the specified database key from the database.", } dbDeleteCmd = cli.Command{ - Action: dbDelete, + Action: utils.MigrateFlags(dbDelete), Name: "delete", Usage: "Delete a database key (WARNING: may corrupt your database)", ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.YoloV3Flag, + utils.ClassicFlag, + utils.MordorFlag, + utils.KottiFlag, + }, Description: `This command deletes the specified database key from the database. WARNING: This is a low-level operation which may cause database corruption!`, } dbPutCmd = cli.Command{ - Action: dbPut, + Action: utils.MigrateFlags(dbPut), Name: "put", Usage: "Set the value of a database key (WARNING: may corrupt your database)", ArgsUsage: " ", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.YoloV3Flag, + utils.ClassicFlag, + utils.MordorFlag, + utils.KottiFlag, + }, Description: `This command sets a given database key to the given value. WARNING: This is a low-level operation which may cause database corruption!`, } + dbGetSlotsCmd = cli.Command{ + Action: utils.MigrateFlags(dbDumpTrie), + Name: "dumptrie", + Usage: "Show the storage key/values of a given storage trie", + ArgsUsage: " ", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.YoloV3Flag, + utils.ClassicFlag, + utils.MordorFlag, + utils.KottiFlag, + }, + Description: "This command looks up the specified database key from the database.", + } ) func removeDB(ctx *cli.Context) error { @@ -192,10 +285,10 @@ func inspect(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - _, chainDb := utils.MakeChain(ctx, stack, true) - defer chainDb.Close() + db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() - return rawdb.InspectDatabase(chainDb, prefix, start) + return rawdb.InspectDatabase(db, prefix, start) } func showLeveldbStats(db ethdb.Stater) { @@ -214,48 +307,32 @@ func showLeveldbStats(db ethdb.Stater) { func dbStats(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - path := stack.ResolvePath("chaindata") - db, err := leveldb.NewCustom(path, "", func(options *opt.Options) { - options.ReadOnly = true - }) - if err != nil { - return err - } + + db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() + showLeveldbStats(db) - err = db.Close() - if err != nil { - log.Info("Close err", "error", err) - } return nil } func dbCompact(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - path := stack.ResolvePath("chaindata") - cache := ctx.GlobalInt(utils.CacheFlag.Name) * ctx.GlobalInt(utils.CacheDatabaseFlag.Name) / 100 - db, err := leveldb.NewCustom(path, "", func(options *opt.Options) { - options.OpenFilesCacheCapacity = utils.MakeDatabaseHandles() - options.BlockCacheCapacity = cache / 2 * opt.MiB - options.WriteBuffer = cache / 4 * opt.MiB // Two of these are used internally - }) - if err != nil { - return err - } + + db := utils.MakeChainDatabase(ctx, stack, false) + defer db.Close() + + log.Info("Stats before compaction") showLeveldbStats(db) + log.Info("Triggering compaction") - err = db.Compact(nil, nil) - if err != nil { + if err := db.Compact(nil, nil); err != nil { log.Info("Compact err", "error", err) + return err } + log.Info("Stats after compaction") showLeveldbStats(db) - log.Info("Closing db") - err = db.Close() - if err != nil { - log.Info("Close err", "error", err) - } - log.Info("Exiting") - return err + return nil } // dbGet shows the value of a given database key @@ -265,14 +342,10 @@ func dbGet(ctx *cli.Context) error { } stack, _ := makeConfigNode(ctx) defer stack.Close() - path := stack.ResolvePath("chaindata") - db, err := leveldb.NewCustom(path, "", func(options *opt.Options) { - options.ReadOnly = true - }) - if err != nil { - return err - } + + db := utils.MakeChainDatabase(ctx, stack, true) defer db.Close() + key, err := hexutil.Decode(ctx.Args().Get(0)) if err != nil { log.Info("Could not decode the key", "error", err) @@ -283,7 +356,7 @@ func dbGet(ctx *cli.Context) error { log.Info("Get operation failed", "error", err) return err } - fmt.Printf("key %#x:\n\t%#x\n", key, data) + fmt.Printf("key %#x: %#x\n", key, data) return nil } @@ -294,13 +367,19 @@ func dbDelete(ctx *cli.Context) error { } stack, _ := makeConfigNode(ctx) defer stack.Close() - db := utils.MakeChainDatabase(ctx, stack) + + db := utils.MakeChainDatabase(ctx, stack, false) defer db.Close() + key, err := hexutil.Decode(ctx.Args().Get(0)) if err != nil { log.Info("Could not decode the key", "error", err) return err } + data, err := db.Get(key) + if err == nil { + fmt.Printf("Previous value: %#x\n", data) + } if err = db.Delete(key); err != nil { log.Info("Delete operation returned an error", "error", err) return err @@ -315,8 +394,10 @@ func dbPut(ctx *cli.Context) error { } stack, _ := makeConfigNode(ctx) defer stack.Close() - db := utils.MakeChainDatabase(ctx, stack) + + db := utils.MakeChainDatabase(ctx, stack, false) defer db.Close() + var ( key []byte value []byte @@ -335,7 +416,57 @@ func dbPut(ctx *cli.Context) error { } data, err = db.Get(key) if err == nil { - fmt.Printf("Previous value:\n%#x\n", data) + fmt.Printf("Previous value: %#x\n", data) } return db.Put(key, value) } + +// dbDumpTrie shows the key-value slots of a given storage trie +func dbDumpTrie(ctx *cli.Context) error { + if ctx.NArg() < 1 { + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() + var ( + root []byte + start []byte + max = int64(-1) + err error + ) + if root, err = hexutil.Decode(ctx.Args().Get(0)); err != nil { + log.Info("Could not decode the root", "error", err) + return err + } + stRoot := common.BytesToHash(root) + if ctx.NArg() >= 2 { + if start, err = hexutil.Decode(ctx.Args().Get(1)); err != nil { + log.Info("Could not decode the seek position", "error", err) + return err + } + } + if ctx.NArg() >= 3 { + if max, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil { + log.Info("Could not decode the max count", "error", err) + return err + } + } + theTrie, err := trie.New(stRoot, trie.NewDatabase(db)) + if err != nil { + return err + } + var count int64 + it := trie.NewIterator(theTrie.NodeIterator(start)) + for it.Next() { + if max > 0 && count == max { + fmt.Printf("Exiting after %d values\n", count) + break + } + fmt.Printf(" %d. key %#x: %#x\n", count, it.Key, it.Value) + count++ + } + return it.Err +} diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 094e672904..f7aaed5245 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -19,9 +19,7 @@ package main import ( "fmt" - "math" "os" - godebug "runtime/debug" "sort" "strconv" "strings" @@ -41,7 +39,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/node" - gopsutil "github.com/shirou/gopsutil/mem" "gopkg.in/urfave/cli.v1" ) @@ -142,13 +139,13 @@ var ( utils.DeveloperFlag, utils.DeveloperPeriodFlag, utils.DeveloperPoWFlag, - utils.ClassicFlag, - utils.MordorFlag, utils.RopstenFlag, utils.RinkebyFlag, - utils.KottiFlag, utils.GoerliFlag, utils.YoloV3Flag, + utils.ClassicFlag, + utils.MordorFlag, + utils.KottiFlag, utils.VMEnableDebugFlag, utils.NetworkIdFlag, utils.EthStatsURLFlag, @@ -160,6 +157,7 @@ var ( utils.GpoMaxGasPriceFlag, utils.EWASMInterpreterFlag, utils.EVMInterpreterFlag, + utils.MinerNotifyFullFlag, utils.ECBP1100Flag, utils.ECBP1100NoDisableFlag, configFileFlag, @@ -335,25 +333,6 @@ func prepare(ctx *cli.Context) { log.Info("Dropping default light client cache", "provided", ctx.GlobalInt(utils.CacheFlag.Name), "updated", 128) ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(128)) } - // Cap the cache allowance and tune the garbage collector - mem, err := gopsutil.VirtualMemory() - if err == nil { - if 32<<(^uintptr(0)>>63) == 32 && mem.Total > 2*1024*1024*1024 { - log.Warn("Lowering memory allowance on 32bit arch", "available", mem.Total/1024/1024, "addressable", 2*1024) - mem.Total = 2 * 1024 * 1024 * 1024 - } - allowance := int(mem.Total / 1024 / 1024 / 3) - if cache := ctx.GlobalInt(utils.CacheFlag.Name); cache > allowance { - log.Warn("Sanitizing cache to Go's GC limits", "provided", cache, "updated", allowance) - ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(allowance)) - } - } - // Ensure Go's GC ignores the database cache for trigger percentage - cache := ctx.GlobalInt(utils.CacheFlag.Name) - gogc := math.Max(20, math.Min(100, 100/(float64(cache)/1024))) - - log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc)) - godebug.SetGCPercent(int(gogc)) // Start metrics export if enabled utils.SetupMetrics(ctx) diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index b59c530c27..e8f6a35438 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -57,6 +57,7 @@ var ( Category: "MISCELLANEOUS COMMANDS", Flags: []cli.Flag{ utils.DataDirFlag, + utils.AncientFlag, utils.RopstenFlag, utils.RinkebyFlag, utils.GoerliFlag, @@ -86,6 +87,7 @@ the trie clean cache with default directory will be deleted. Category: "MISCELLANEOUS COMMANDS", Flags: []cli.Flag{ utils.DataDirFlag, + utils.AncientFlag, utils.RopstenFlag, utils.RinkebyFlag, utils.GoerliFlag, @@ -105,6 +107,7 @@ In other words, this command does the snapshot to trie conversion. Category: "MISCELLANEOUS COMMANDS", Flags: []cli.Flag{ utils.DataDirFlag, + utils.AncientFlag, utils.RopstenFlag, utils.RinkebyFlag, utils.GoerliFlag, @@ -126,6 +129,7 @@ It's also usable without snapshot enabled. Category: "MISCELLANEOUS COMMANDS", Flags: []cli.Flag{ utils.DataDirFlag, + utils.AncientFlag, utils.RopstenFlag, utils.RinkebyFlag, utils.GoerliFlag, @@ -148,10 +152,8 @@ func pruneState(ctx *cli.Context) error { stack, config := makeConfigNode(ctx) defer stack.Close() - chain, chaindb := utils.MakeChain(ctx, stack, true) - defer chaindb.Close() - - pruner, err := pruner.NewPruner(chaindb, chain.CurrentBlock().Header(), stack.ResolvePath(""), stack.ResolvePath(config.Eth.TrieCleanCacheJournal), ctx.GlobalUint64(utils.BloomFilterSizeFlag.Name)) + chaindb := utils.MakeChainDatabase(ctx, stack, false) + pruner, err := pruner.NewPruner(chaindb, stack.ResolvePath(""), stack.ResolvePath(config.Eth.TrieCleanCacheJournal), ctx.GlobalUint64(utils.BloomFilterSizeFlag.Name)) if err != nil { log.Error("Failed to open snapshot tree", "error", err) return err @@ -179,10 +181,13 @@ func verifyState(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - chain, chaindb := utils.MakeChain(ctx, stack, true) - defer chaindb.Close() - - snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, chain.CurrentBlock().Root(), false, false, false) + chaindb := utils.MakeChainDatabase(ctx, stack, true) + headBlock := rawdb.ReadHeadBlock(chaindb) + if headBlock == nil { + log.Error("Failed to load head block") + return errors.New("no head block") + } + snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, headBlock.Root(), false, false, false) if err != nil { log.Error("Failed to open snapshot tree", "error", err) return err @@ -191,7 +196,7 @@ func verifyState(ctx *cli.Context) error { log.Error("Too many arguments given") return errors.New("too many arguments") } - var root = chain.CurrentBlock().Root() + var root = headBlock.Root() if ctx.NArg() == 1 { root, err = parseRoot(ctx.Args()[0]) if err != nil { @@ -214,19 +219,16 @@ func traverseState(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - chain, chaindb := utils.MakeChain(ctx, stack, true) - defer chaindb.Close() - + chaindb := utils.MakeChainDatabase(ctx, stack, true) + headBlock := rawdb.ReadHeadBlock(chaindb) + if headBlock == nil { + log.Error("Failed to load head block") + return errors.New("no head block") + } if ctx.NArg() > 1 { log.Error("Too many arguments given") return errors.New("too many arguments") } - // Use the HEAD root as the default - head := chain.CurrentBlock() - if head == nil { - log.Error("Head block is missing") - return errors.New("head block is missing") - } var ( root common.Hash err error @@ -239,8 +241,8 @@ func traverseState(ctx *cli.Context) error { } log.Info("Start traversing the state", "root", root) } else { - root = head.Root() - log.Info("Start traversing the state", "root", root, "number", head.NumberU64()) + root = headBlock.Root() + log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64()) } triedb := trie.NewDatabase(chaindb) t, err := trie.NewSecure(root, triedb) @@ -307,19 +309,16 @@ func traverseRawState(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - chain, chaindb := utils.MakeChain(ctx, stack, true) - defer chaindb.Close() - + chaindb := utils.MakeChainDatabase(ctx, stack, true) + headBlock := rawdb.ReadHeadBlock(chaindb) + if headBlock == nil { + log.Error("Failed to load head block") + return errors.New("no head block") + } if ctx.NArg() > 1 { log.Error("Too many arguments given") return errors.New("too many arguments") } - // Use the HEAD root as the default - head := chain.CurrentBlock() - if head == nil { - log.Error("Head block is missing") - return errors.New("head block is missing") - } var ( root common.Hash err error @@ -332,8 +331,8 @@ func traverseRawState(ctx *cli.Context) error { } log.Info("Start traversing the state", "root", root) } else { - root = head.Root() - log.Info("Start traversing the state", "root", root, "number", head.NumberU64()) + root = headBlock.Root() + log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64()) } triedb := trie.NewDatabase(chaindb) t, err := trie.NewSecure(root, triedb) diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 22010d86dc..55bfd259de 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -188,6 +188,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.MiningEnabledFlag, utils.MinerThreadsFlag, utils.MinerNotifyFlag, + utils.MinerNotifyFullFlag, utils.MinerGasPriceFlag, utils.MinerGasTargetFlag, utils.MinerGasLimitFlag, diff --git a/cmd/utils/diskusage.go b/cmd/utils/diskusage.go index da696de6bf..c3d51765f8 100644 --- a/cmd/utils/diskusage.go +++ b/cmd/utils/diskusage.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// +build !windows +// +build !windows,!openbsd package utils diff --git a/cmd/utils/diskusage_openbsd.go b/cmd/utils/diskusage_openbsd.go new file mode 100644 index 0000000000..54f759d291 --- /dev/null +++ b/cmd/utils/diskusage_openbsd.go @@ -0,0 +1,43 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// +build openbsd + +package utils + +import ( + "fmt" + + "golang.org/x/sys/unix" +) + +func getFreeDiskSpace(path string) (uint64, error) { + var stat unix.Statfs_t + if err := unix.Statfs(path, &stat); err != nil { + return 0, fmt.Errorf("failed to call Statfs: %v", err) + } + + // Available blocks * size per block = available space in bytes + var bavail = stat.F_bavail + // Not sure if the following check is necessary for OpenBSD + if stat.F_bavail < 0 { + // FreeBSD can have a negative number of blocks available + // because of the grace limit. + bavail = 0 + } + //nolint:unconvert + return uint64(bavail) * uint64(stat.F_bsize), nil +} diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 22b361192e..5caa492bdc 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -22,10 +22,12 @@ import ( "fmt" "io" "io/ioutil" + "math" "math/big" "os" "path/filepath" "runtime" + godebug "runtime/debug" "strconv" "strings" "text/tabwriter" @@ -36,7 +38,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/fdlimit" - "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/clique" "github.com/ethereum/go-ethereum/consensus/ethash" @@ -70,6 +72,7 @@ import ( "github.com/ethereum/go-ethereum/params/types/genesisT" "github.com/ethereum/go-ethereum/params/vars" pcsclite "github.com/gballet/go-libpcsclite" + gopsutil "github.com/shirou/gopsutil/mem" "gopkg.in/urfave/cli.v1" ) @@ -463,6 +466,10 @@ var ( Name: "miner.notify", Usage: "Comma separated HTTP URL list to notify of new work packages", } + MinerNotifyFullFlag = cli.BoolFlag{ + Name: "miner.notify.full", + Usage: "Notify with pending block headers instead of work packages", + } MinerGasTargetFlag = cli.Uint64Flag{ Name: "miner.gastarget", Usage: "Target gas floor for mined blocks", @@ -1472,6 +1479,7 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) { if ctx.GlobalIsSet(MinerNotifyFlag.Name) { cfg.Notify = strings.Split(ctx.GlobalString(MinerNotifyFlag.Name), ",") } + cfg.NotifyFull = ctx.GlobalBool(MinerNotifyFullFlag.Name) if ctx.GlobalIsSet(MinerExtraDataFlag.Name) { cfg.ExtraData = []byte(ctx.GlobalString(MinerExtraDataFlag.Name)) } @@ -1591,6 +1599,26 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { setWhitelist(ctx, cfg) setLes(ctx, cfg) + // Cap the cache allowance and tune the garbage collector + mem, err := gopsutil.VirtualMemory() + if err == nil { + if 32<<(^uintptr(0)>>63) == 32 && mem.Total > 2*1024*1024*1024 { + log.Warn("Lowering memory allowance on 32bit arch", "available", mem.Total/1024/1024, "addressable", 2*1024) + mem.Total = 2 * 1024 * 1024 * 1024 + } + allowance := int(mem.Total / 1024 / 1024 / 3) + if cache := ctx.GlobalInt(CacheFlag.Name); cache > allowance { + log.Warn("Sanitizing cache to Go's GC limits", "provided", cache, "updated", allowance) + ctx.GlobalSet(CacheFlag.Name, strconv.Itoa(allowance)) + } + } + // Ensure Go's GC ignores the database cache for trigger percentage + cache := ctx.GlobalInt(CacheFlag.Name) + gogc := math.Max(20, math.Min(100, 100/(float64(cache)/1024))) + + log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc)) + godebug.SetGCPercent(int(gogc)) + if ctx.GlobalIsSet(SyncModeFlag.Name) { cfg.SyncMode = *GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode) } @@ -1801,7 +1829,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if ctx.GlobalIsSet(DataDirFlag.Name) { // Check if we have an already initialized chain and fall back to // that if so. Otherwise we need to generate a new genesis spec. - chaindb := MakeChainDatabase(ctx, stack) + chaindb := MakeChainDatabase(ctx, stack, true) if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) { cfg.Genesis = nil // fallback to db content } @@ -1929,7 +1957,7 @@ func SplitTagsFlag(tagsFlag string) map[string]string { } // MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails. -func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database { +func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.Database { var ( cache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100 handles = MakeDatabaseHandles() @@ -1943,9 +1971,9 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database { name = "lightchaindata" } if ctx.GlobalIsSet(AncientRPCFlag.Name) { - chainDb, err = stack.OpenDatabaseWithFreezerRemote(name, cache, handles, ctx.GlobalString(AncientRPCFlag.Name)) + chainDb, err = stack.OpenDatabaseWithFreezerRemote(name, cache, handles, ctx.GlobalString(AncientRPCFlag.Name), readonly) } else { - chainDb, err = stack.OpenDatabaseWithFreezer(name, cache, handles, ctx.GlobalString(AncientFlag.Name), "") + chainDb, err = stack.OpenDatabaseWithFreezer(name, cache, handles, ctx.GlobalString(AncientFlag.Name), "", readonly) } if err != nil { Fatalf("Could not open database: %v", err) @@ -1988,9 +2016,9 @@ func MakeGenesis(ctx *cli.Context) *genesisT.Genesis { } // MakeChain creates a chain manager from set command line flags. -func MakeChain(ctx *cli.Context, stack *node.Node, readOnly bool) (chain *core.BlockChain, chainDb ethdb.Database) { +func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chainDb ethdb.Database) { var err error - chainDb = MakeChainDatabase(ctx, stack) + chainDb = MakeChainDatabase(ctx, stack, false) // TODO(rjl493456442) support read-only database config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx)) if err != nil { Fatalf("%v", err) @@ -2045,12 +2073,10 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readOnly bool) (chain *core.B cache.TrieDirtyLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100 } vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)} - var limit *uint64 - if ctx.GlobalIsSet(TxLookupLimitFlag.Name) && !readOnly { - l := ctx.GlobalUint64(TxLookupLimitFlag.Name) - limit = &l - } - chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg, nil, limit) + + // TODO(rjl493456442) disable snapshot generation/wiping if the chain is read only. + // Disable transaction indexing/unindexing by default. + chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg, nil, nil) if err != nil { Fatalf("Can't create BlockChain: %v", err) } diff --git a/common/prque/lazyqueue.go b/common/prque/lazyqueue.go index c74faab7e6..37c2f3bd42 100644 --- a/common/prque/lazyqueue.go +++ b/common/prque/lazyqueue.go @@ -55,7 +55,7 @@ type ( // NewLazyQueue creates a new lazy queue func NewLazyQueue(setIndex SetIndexCallback, priority PriorityCallback, maxPriority MaxPriorityCallback, clock mclock.Clock, refreshPeriod time.Duration) *LazyQueue { q := &LazyQueue{ - popQueue: newSstack(nil), + popQueue: newSstack(nil, false), setIndex: setIndex, priority: priority, maxPriority: maxPriority, @@ -71,8 +71,8 @@ func NewLazyQueue(setIndex SetIndexCallback, priority PriorityCallback, maxPrior // Reset clears the contents of the queue func (q *LazyQueue) Reset() { - q.queue[0] = newSstack(q.setIndex0) - q.queue[1] = newSstack(q.setIndex1) + q.queue[0] = newSstack(q.setIndex0, false) + q.queue[1] = newSstack(q.setIndex1, false) } // Refresh performs queue re-evaluation if necessary diff --git a/common/prque/prque.go b/common/prque/prque.go index 3cc5a1adaf..54c78b5fc2 100755 --- a/common/prque/prque.go +++ b/common/prque/prque.go @@ -28,7 +28,12 @@ type Prque struct { // New creates a new priority queue. func New(setIndex SetIndexCallback) *Prque { - return &Prque{newSstack(setIndex)} + return &Prque{newSstack(setIndex, false)} +} + +// NewWrapAround creates a new priority queue with wrap-around priority handling. +func NewWrapAround(setIndex SetIndexCallback) *Prque { + return &Prque{newSstack(setIndex, true)} } // Pushes a value with a given priority into the queue, expanding if necessary. diff --git a/common/prque/sstack.go b/common/prque/sstack.go index 8518af54ff..b06a95413d 100755 --- a/common/prque/sstack.go +++ b/common/prque/sstack.go @@ -31,22 +31,24 @@ type SetIndexCallback func(data interface{}, index int) // the stack (heap) functionality and the Len, Less and Swap methods for the // sortability requirements of the heaps. type sstack struct { - setIndex SetIndexCallback - size int - capacity int - offset int + setIndex SetIndexCallback + size int + capacity int + offset int + wrapAround bool blocks [][]*item active []*item } // Creates a new, empty stack. -func newSstack(setIndex SetIndexCallback) *sstack { +func newSstack(setIndex SetIndexCallback, wrapAround bool) *sstack { result := new(sstack) result.setIndex = setIndex result.active = make([]*item, blockSize) result.blocks = [][]*item{result.active} result.capacity = blockSize + result.wrapAround = wrapAround return result } @@ -94,7 +96,11 @@ func (s *sstack) Len() int { // Compares the priority of two elements of the stack (higher is first). // Required by sort.Interface. func (s *sstack) Less(i, j int) bool { - return (s.blocks[i/blockSize][i%blockSize].priority - s.blocks[j/blockSize][j%blockSize].priority) > 0 + a, b := s.blocks[i/blockSize][i%blockSize].priority, s.blocks[j/blockSize][j%blockSize].priority + if s.wrapAround { + return a-b > 0 + } + return a > b } // Swaps two elements in the stack. Required by sort.Interface. @@ -110,5 +116,5 @@ func (s *sstack) Swap(i, j int) { // Resets the stack, effectively clearing its contents. func (s *sstack) Reset() { - *s = *newSstack(s.setIndex) + *s = *newSstack(s.setIndex, false) } diff --git a/common/prque/sstack_test.go b/common/prque/sstack_test.go index 2ff093579d..bc6298979c 100644 --- a/common/prque/sstack_test.go +++ b/common/prque/sstack_test.go @@ -21,7 +21,7 @@ func TestSstack(t *testing.T) { for i := 0; i < size; i++ { data[i] = &item{rand.Int(), rand.Int63()} } - stack := newSstack(nil) + stack := newSstack(nil, false) for rep := 0; rep < 2; rep++ { // Push all the data into the stack, pop out every second secs := []*item{} @@ -55,7 +55,7 @@ func TestSstackSort(t *testing.T) { data[i] = &item{rand.Int(), int64(i)} } // Push all the data into the stack - stack := newSstack(nil) + stack := newSstack(nil, false) for _, val := range data { stack.Push(val) } @@ -76,7 +76,7 @@ func TestSstackReset(t *testing.T) { for i := 0; i < size; i++ { data[i] = &item{rand.Int(), rand.Int63()} } - stack := newSstack(nil) + stack := newSstack(nil, false) for rep := 0; rep < 2; rep++ { // Push all the data into the stack, pop out every second secs := []*item{} diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go index 53d2641601..af9f4bc1f2 100644 --- a/consensus/ethash/algorithm.go +++ b/consensus/ethash/algorithm.go @@ -199,7 +199,7 @@ func generateCache(dest []uint32, epoch uint64, epochLength uint64, seed []byte) case <-done: return case <-time.After(3 * time.Second): - logger.Info("Generating ethash verification cache", "epochLength", epochLength, "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/4, "elapsed", common.PrettyDuration(time.Since(start))) + logger.Info("Generating ethash verification cache", "epochLength", epochLength, "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/(cacheRounds+1), "elapsed", common.PrettyDuration(time.Since(start))) } } }() diff --git a/consensus/ethash/algorithm_test.go b/consensus/ethash/algorithm_test.go index 9019840185..84b55282d3 100644 --- a/consensus/ethash/algorithm_test.go +++ b/consensus/ethash/algorithm_test.go @@ -752,10 +752,14 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) { for i := 0; i < 3; i++ { pend.Add(1) - go func(idx int) { defer pend.Done() - ethash := New(Config{cachedir, 0, 1, false, "", 0, 0, false, ModeNormal, nil, nil}, nil, false) + + config := Config{ + CacheDir: cachedir, + CachesOnDisk: 1, + } + ethash := New(config, nil, false) defer ethash.Close() if err := ethash.verifySeal(nil, block.Header(), false); err != nil { t.Errorf("proc %d: block verification failed: %v", idx, err) diff --git a/consensus/ethash/api.go b/consensus/ethash/api.go index 68b3a84b09..f4d3802e0b 100644 --- a/consensus/ethash/api.go +++ b/consensus/ethash/api.go @@ -89,7 +89,7 @@ func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) boo // // It accepts the miner hash rate and an identifier which must be unique // between nodes. -func (api *API) SubmitHashRate(rate hexutil.Uint64, id common.Hash) bool { +func (api *API) SubmitHashrate(rate hexutil.Uint64, id common.Hash) bool { if api.ethash.remote == nil { return false } diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go index f5e6778237..3f74b266ab 100644 --- a/consensus/ethash/ethash.go +++ b/consensus/ethash/ethash.go @@ -51,7 +51,7 @@ var ( two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) // sharedEthash is a full instance that can be shared between multiple users. - sharedEthash = New(Config{"", 3, 0, false, "", 1, 0, false, ModeNormal, nil, nil}, nil, false) + sharedEthash *Ethash // algorithmRevision is the data structure version used for file naming. algorithmRevision = 23 @@ -60,6 +60,15 @@ var ( dumpMagic = []uint32{0xbaddcafe, 0xfee1dead} ) +func init() { + sharedConfig := Config{ + PowMode: ModeNormal, + CachesInMem: 3, + DatasetsInMem: 1, + } + sharedEthash = New(sharedConfig, nil, false) +} + // isLittleEndian returns whether the local system is running in little or big // endian byte order. func isLittleEndian() bool { @@ -533,6 +542,10 @@ type Config struct { DatasetsLockMmap bool PowMode Mode + // When set, notifications sent by the remote sealer will + // be block header JSON objects instead of work package arrays. + NotifyFull bool + Log log.Logger `toml:"-"` // ECIP-1099 ECIP1099Block *uint64 `toml:"-"` @@ -586,6 +599,9 @@ func New(config Config, notify []string, noverify bool) *Ethash { update: make(chan struct{}), hashrate: metrics.NewMeterForced(), } + if config.PowMode == ModeShared { + ethash.shared = sharedEthash + } ethash.remote = startRemoteSealer(ethash, notify, noverify) return ethash } @@ -593,15 +609,7 @@ func New(config Config, notify []string, noverify bool) *Ethash { // NewTester creates a small sized ethash PoW scheme useful only for testing // purposes. func NewTester(notify []string, noverify bool) *Ethash { - ethash := &Ethash{ - config: Config{PowMode: ModeTest, Log: log.Root()}, - caches: newlru("cache", 1, newCache), - datasets: newlru("dataset", 1, newDataset), - update: make(chan struct{}), - hashrate: metrics.NewMeterForced(), - } - ethash.remote = startRemoteSealer(ethash, notify, noverify) - return ethash + return New(Config{PowMode: ModeTest}, notify, noverify) } // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts @@ -673,7 +681,6 @@ func NewShared() *Ethash { // Close closes the exit channel to notify all backend threads exiting. func (ethash *Ethash) Close() error { - var err error ethash.closeOnce.Do(func() { // Short circuit if the exit channel is not allocated. if ethash.remote == nil { @@ -682,7 +689,7 @@ func (ethash *Ethash) Close() error { close(ethash.remote.requestExit) <-ethash.remote.exitCh }) - return err + return nil } // cache tries to retrieve a verification cache for the specified block number diff --git a/consensus/ethash/ethash_test.go b/consensus/ethash/ethash_test.go index fa37a9d9af..cbdb131fba 100644 --- a/consensus/ethash/ethash_test.go +++ b/consensus/ethash/ethash_test.go @@ -113,7 +113,14 @@ func TestCacheFileEvict(t *testing.T) { t.Fatal(err) } defer os.RemoveAll(tmpdir) - e := New(Config{CachesInMem: 3, CachesOnDisk: 10, CacheDir: tmpdir, PowMode: ModeTest}, nil, false) + + config := Config{ + CachesInMem: 3, + CachesOnDisk: 10, + CacheDir: tmpdir, + PowMode: ModeTest, + } + e := New(config, nil, false) defer e.Close() workers := 8 @@ -178,7 +185,7 @@ func TestRemoteSealer(t *testing.T) { } } -func TestHashRate(t *testing.T) { +func TestHashrate(t *testing.T) { var ( hashrate = []hexutil.Uint64{100, 200, 300} expect uint64 @@ -193,7 +200,7 @@ func TestHashRate(t *testing.T) { api := &API{ethash} for i := 0; i < len(hashrate); i += 1 { - if res := api.SubmitHashRate(hashrate[i], ids[i]); !res { + if res := api.SubmitHashrate(hashrate[i], ids[i]); !res { t.Error("remote miner submit hashrate failed") } expect += uint64(hashrate[i]) @@ -213,7 +220,7 @@ func TestClosedRemoteSealer(t *testing.T) { t.Error("expect to return an error to indicate ethash is stopped") } - if res := api.SubmitHashRate(hexutil.Uint64(100), common.HexToHash("a")); res { + if res := api.SubmitHashrate(hexutil.Uint64(100), common.HexToHash("a")); res { t.Error("expect to return false when submit hashrate to a stopped ethash") } } diff --git a/consensus/ethash/sealer.go b/consensus/ethash/sealer.go index 480afe4cf3..f28ed7ffc4 100644 --- a/consensus/ethash/sealer.go +++ b/consensus/ethash/sealer.go @@ -406,7 +406,16 @@ func (s *remoteSealer) makeWork(block *types.Block) { // new work to be processed. func (s *remoteSealer) notifyWork() { work := s.currentWork - blob, _ := json.Marshal(work) + + // Encode the JSON payload of the notification. When NotifyFull is set, + // this is the complete block header, otherwise it is a JSON array. + var blob []byte + if s.ethash.config.NotifyFull { + blob, _ = json.Marshal(s.currentBlock.Header()) + } else { + blob, _ = json.Marshal(work) + } + s.reqWG.Add(len(s.notifyURLs)) for _, url := range s.notifyURLs { go s.sendNotification(s.notifyCtx, url, blob, work) diff --git a/consensus/ethash/sealer_test.go b/consensus/ethash/sealer_test.go index 7f32832d90..4f42b789bd 100644 --- a/consensus/ethash/sealer_test.go +++ b/consensus/ethash/sealer_test.go @@ -23,6 +23,7 @@ import ( "net/http" "net/http/httptest" "sort" + "strconv" "testing" "time" @@ -94,6 +95,50 @@ func TestRemoteNotify(t *testing.T) { } } +// Tests whether remote HTTP servers are correctly notified of new work. (Full pending block body / --miner.notify.full) +func TestRemoteNotifyFull(t *testing.T) { + // Start a simple web server to capture notifications. + sink := make(chan map[string]interface{}) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + blob, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("failed to read miner notification: %v", err) + } + var work map[string]interface{} + if err := json.Unmarshal(blob, &work); err != nil { + t.Errorf("failed to unmarshal miner notification: %v", err) + } + sink <- work + })) + defer server.Close() + + // Create the custom ethash engine. + config := Config{ + PowMode: ModeTest, + NotifyFull: true, + Log: testlog.Logger(t, log.LvlWarn), + } + ethash := New(config, []string{server.URL}, false) + defer ethash.Close() + + // Stream a work task and ensure the notification bubbles out. + header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)} + block := types.NewBlockWithHeader(header) + + ethash.Seal(nil, block, nil, nil) + select { + case work := <-sink: + if want := "0x" + strconv.FormatUint(header.Number.Uint64(), 16); work["number"] != want { + t.Errorf("pending block number mismatch: have %v, want %v", work["number"], want) + } + if want := "0x" + header.Difficulty.Text(16); work["difficulty"] != want { + t.Errorf("pending block difficulty mismatch: have %s, want %s", work["difficulty"], want) + } + case <-time.After(3 * time.Second): + t.Fatalf("notification timed out") + } +} + // Tests that pushing work packages fast to the miner doesn't cause any data race // issues in the notifications. func TestRemoteMultiNotify(t *testing.T) { @@ -139,6 +184,55 @@ func TestRemoteMultiNotify(t *testing.T) { } } +// Tests that pushing work packages fast to the miner doesn't cause any data race +// issues in the notifications. Full pending block body / --miner.notify.full) +func TestRemoteMultiNotifyFull(t *testing.T) { + // Start a simple web server to capture notifications. + sink := make(chan map[string]interface{}, 64) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + blob, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("failed to read miner notification: %v", err) + } + var work map[string]interface{} + if err := json.Unmarshal(blob, &work); err != nil { + t.Errorf("failed to unmarshal miner notification: %v", err) + } + sink <- work + })) + defer server.Close() + + // Create the custom ethash engine. + config := Config{ + PowMode: ModeTest, + NotifyFull: true, + Log: testlog.Logger(t, log.LvlWarn), + } + ethash := New(config, []string{server.URL}, false) + defer ethash.Close() + + // Provide a results reader. + // Otherwise the unread results will be logged asynchronously + // and this can happen after the test is finished, causing a panic. + results := make(chan *types.Block, cap(sink)) + + // Stream a lot of work task and ensure all the notifications bubble out. + for i := 0; i < cap(sink); i++ { + header := &types.Header{Number: big.NewInt(int64(i)), Difficulty: big.NewInt(100)} + block := types.NewBlockWithHeader(header) + ethash.Seal(nil, block, results, nil) + } + + for i := 0; i < cap(sink); i++ { + select { + case <-sink: + <-results + case <-time.After(10 * time.Second): + t.Fatalf("notification %d timed out", i) + } + } +} + // Tests whether stale solutions are correctly processed. func TestStaleSubmission(t *testing.T) { ethash := NewTester(nil, true) diff --git a/core/bench_test.go b/core/bench_test.go index 111fa577a0..5624fb76e9 100644 --- a/core/bench_test.go +++ b/core/bench_test.go @@ -159,7 +159,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) { b.Fatalf("cannot create temporary directory: %v", err) } defer os.RemoveAll(dir) - db, err = rawdb.NewLevelDBDatabase(dir, 128, 128, "") + db, err = rawdb.NewLevelDBDatabase(dir, 128, 128, "", false) if err != nil { b.Fatalf("cannot create temporary database: %v", err) } @@ -257,7 +257,7 @@ func benchWriteChain(b *testing.B, full bool, count uint64) { if err != nil { b.Fatalf("cannot create temporary directory: %v", err) } - db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "") + db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false) if err != nil { b.Fatalf("error opening database at %v: %v", dir, err) } @@ -274,7 +274,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) { } defer os.RemoveAll(dir) - db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "") + db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false) if err != nil { b.Fatalf("error opening database at %v: %v", dir, err) } @@ -285,7 +285,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) { b.ResetTimer() for i := 0; i < b.N; i++ { - db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "") + db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false) if err != nil { b.Fatalf("error opening database at %v: %v", dir, err) } diff --git a/core/blockchain_remotefreezer_test.go b/core/blockchain_remotefreezer_test.go index a255e81a0d..58d1ac0d1d 100644 --- a/core/blockchain_remotefreezer_test.go +++ b/core/blockchain_remotefreezer_test.go @@ -80,7 +80,7 @@ func testRPCRemoteFreezer(t *testing.T) (rpcFreezerEndpoint string, server *rpc. t.Log("Using external freezer:", rpcFreezerEndpoint) } - ancientDb, err := rawdb.NewDatabaseWithFreezerRemote(rawdb.NewMemoryDatabase(), rpcFreezerEndpoint) + ancientDb, err := rawdb.NewDatabaseWithFreezerRemote(rawdb.NewMemoryDatabase(), rpcFreezerEndpoint, false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -424,7 +424,7 @@ func TestTransactionIndices_RemoteFreezer(t *testing.T) { // Init block chain with external ancients, check all needed indices has been indexed. limit := []uint64{0, 32, 64, 128} for _, l := range limit { - ancientDb, err := rawdb.NewDatabaseWithFreezerRemote(rawdb.NewMemoryDatabase(), freezerRPCEndpoint) + ancientDb, err := rawdb.NewDatabaseWithFreezerRemote(rawdb.NewMemoryDatabase(), freezerRPCEndpoint, false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -444,7 +444,7 @@ func TestTransactionIndices_RemoteFreezer(t *testing.T) { } // Reconstruct a block chain which only reserves HEAD-64 tx indices - ancientDb, err = rawdb.NewDatabaseWithFreezerRemote(rawdb.NewMemoryDatabase(), freezerRPCEndpoint) + ancientDb, err = rawdb.NewDatabaseWithFreezerRemote(rawdb.NewMemoryDatabase(), freezerRPCEndpoint, false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go index f35fb62636..cd13767b99 100644 --- a/core/blockchain_repair_test.go +++ b/core/blockchain_repair_test.go @@ -1763,7 +1763,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { } os.RemoveAll(datadir) - db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") + db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) } @@ -1818,7 +1818,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { } // Force run a freeze cycle type freezer interface { - Freeze(threshold uint64) + Freeze(threshold uint64) error Ancients() (uint64, error) } db.(freezer).Freeze(tt.freezeThreshold) @@ -1831,7 +1831,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { db.Close() // Start a new blockchain back up and see where the repait leads us - db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") + db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false) if err != nil { t.Fatalf("Failed to reopen persistent database: %v", err) } diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go index 8d2af33822..ee1f951f61 100644 --- a/core/blockchain_sethead_test.go +++ b/core/blockchain_sethead_test.go @@ -1962,7 +1962,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) { } os.RemoveAll(datadir) - db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") + db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) } @@ -2024,7 +2024,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) { } // Force run a freeze cycle type freezer interface { - Freeze(threshold uint64) + Freeze(threshold uint64) error Ancients() (uint64, error) } db.(freezer).Freeze(tt.freezeThreshold) diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go index 90706216a6..2eda07d4aa 100644 --- a/core/blockchain_snapshot_test.go +++ b/core/blockchain_snapshot_test.go @@ -66,7 +66,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo } os.RemoveAll(datadir) - db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") + db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) } @@ -262,7 +262,7 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) { db.Close() // Start a new blockchain back up and see where the repair leads us - newdb, err := rawdb.NewLevelDBDatabaseWithFreezer(snaptest.datadir, 0, 0, snaptest.datadir, "") + newdb, err := rawdb.NewLevelDBDatabaseWithFreezer(snaptest.datadir, 0, 0, snaptest.datadir, "", false) if err != nil { t.Fatalf("Failed to reopen persistent database: %v", err) } diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 3d2eafdf01..5f7c3a89f0 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -654,7 +654,7 @@ func TestFastVsFullChains(t *testing.T) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(frdir) - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "") + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -728,7 +728,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(dir) - db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "") + db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -1603,7 +1603,7 @@ func TestBlockchainRecovery(t *testing.T) { } defer os.Remove(frdir) - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "") + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -1660,7 +1660,7 @@ func TestIncompleteAncientReceiptChainInsertion(t *testing.T) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(frdir) - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "") + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -1859,7 +1859,7 @@ func testInsertKnownChainData(t *testing.T, typ string) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(dir) - chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "") + chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -2139,7 +2139,7 @@ func TestTransactionIndices(t *testing.T) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(frdir) - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "") + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -2167,7 +2167,7 @@ func TestTransactionIndices(t *testing.T) { // Init block chain with external ancients, check all needed indices has been indexed. limit := []uint64{0, 32, 64, 128} for _, l := range limit { - ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "") + ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -2187,7 +2187,7 @@ func TestTransactionIndices(t *testing.T) { } // Reconstruct a block chain which only reserves HEAD-64 tx indices - ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "") + ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -2266,7 +2266,7 @@ func TestSkipStaleTxIndicesInFastSync(t *testing.T) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(frdir) - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "") + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } diff --git a/core/chain_indexer_test.go b/core/chain_indexer_test.go index b76203dc8f..f099609015 100644 --- a/core/chain_indexer_test.go +++ b/core/chain_indexer_test.go @@ -18,6 +18,7 @@ package core import ( "context" + "errors" "fmt" "math/big" "math/rand" @@ -224,7 +225,10 @@ func (b *testChainIndexBackend) Process(ctx context.Context, header *types.Heade //t.processCh <- header.Number.Uint64() select { case <-time.After(10 * time.Second): - b.t.Fatal("Unexpected call to Process") + b.t.Error("Unexpected call to Process") + // Can't use Fatal since this is not the test's goroutine. + // Returning error stops the chainIndexer's updateLoop + return errors.New("Unexpected call to Process") case b.processCh <- header.Number.Uint64(): } return nil diff --git a/core/chain_makers.go b/core/chain_makers.go index 4404d20866..0297462111 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -114,6 +114,11 @@ func (b *BlockGen) AddTxWithChain(bc *BlockChain, tx *types.Transaction) { b.receipts = append(b.receipts, receipt) } +// GetBalance returns the balance of the given address at the generated block. +func (b *BlockGen) GetBalance(addr common.Address) *big.Int { + return b.statedb.GetBalance(addr) +} + // AddUncheckedTx forcefully adds a transaction to the block without any // validation. // diff --git a/core/headerchain.go b/core/headerchain.go index cf3e30c037..678c54450b 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -298,12 +298,12 @@ func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWrit func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) { // Do a sanity check that the provided chain is actually ordered and linked + // NOTE(meowsbits): These checks for nil heads are core-geth specific. if len(chain) == 0 { return 0, nil - } else if len(chain) >= 1 { - if chain[0] == nil { - return 0, fmt.Errorf("header was nil") - } + } + if chain[0] == nil { + return 0, fmt.Errorf("header was nil") } for i := 1; i < len(chain); i++ { if chain[i] == nil { @@ -333,7 +333,7 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) seals := make([]bool, len(chain)) if checkFreq != 0 { // In case of checkFreq == 0 all seals are left false. - for i := 0; i < len(seals)/checkFreq; i++ { + for i := 0; i <= len(seals)/checkFreq; i++ { index := i*checkFreq + hc.rand.Intn(checkFreq) if index >= len(seals) { index = len(seals) - 1 diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index e6c07e2763..7a8ee4ab4f 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -825,3 +825,29 @@ func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header { } return a } + +// ReadHeadHeader returns the current canonical head header. +func ReadHeadHeader(db ethdb.Reader) *types.Header { + headHeaderHash := ReadHeadHeaderHash(db) + if headHeaderHash == (common.Hash{}) { + return nil + } + headHeaderNumber := ReadHeaderNumber(db, headHeaderHash) + if headHeaderNumber == nil { + return nil + } + return ReadHeader(db, headHeaderHash, *headHeaderNumber) +} + +// ReadHeadHeader returns the current canonical head block. +func ReadHeadBlock(db ethdb.Reader) *types.Block { + headBlockHash := ReadHeadBlockHash(db) + if headBlockHash == (common.Hash{}) { + return nil + } + headBlockNumber := ReadHeaderNumber(db, headBlockHash) + if headBlockNumber == nil { + return nil + } + return ReadBlock(db, headBlockHash, *headBlockNumber) +} diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index a5804cd309..ea9dc436cf 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -440,7 +440,7 @@ func TestAncientStorage(t *testing.T) { } defer os.Remove(frdir) - db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "") + db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false) if err != nil { t.Fatalf("failed to create database with ancient backend") } diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go index 862a549540..ad222005be 100644 --- a/core/rawdb/chain_iterator.go +++ b/core/rawdb/chain_iterator.go @@ -23,10 +23,10 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/prque" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/crypto/sha3" ) // InitDatabaseFromFreezer reinitializes an empty database from a previous batch @@ -135,32 +135,15 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool close(hashesCh) } }() - - var hasher = sha3.NewLegacyKeccak256() for data := range rlpCh { - it, err := rlp.NewListIterator(data.rlp) - if err != nil { - log.Warn("tx iteration error", "error", err) - return - } - it.Next() - txs := it.Value() - txIt, err := rlp.NewListIterator(txs) - if err != nil { - log.Warn("tx iteration error", "error", err) + var body types.Body + if err := rlp.DecodeBytes(data.rlp, &body); err != nil { + log.Warn("Failed to decode block body", "block", data.number, "error", err) return } var hashes []common.Hash - for txIt.Next() { - if err := txIt.Err(); err != nil { - log.Warn("tx iteration error", "error", err) - return - } - var txHash common.Hash - hasher.Reset() - hasher.Write(txIt.Value()) - hasher.Sum(txHash[:0]) - hashes = append(hashes, txHash) + for _, tx := range body.Transactions { + hashes = append(hashes, tx.Hash()) } result := &blockTxHashes{ hashes: hashes, diff --git a/core/rawdb/chain_iterator_test.go b/core/rawdb/chain_iterator_test.go index 90b2639d38..45cc6323e0 100644 --- a/core/rawdb/chain_iterator_test.go +++ b/core/rawdb/chain_iterator_test.go @@ -33,14 +33,34 @@ func TestChainIterator(t *testing.T) { var block *types.Block var txs []*types.Transaction - for i := uint64(0); i <= 10; i++ { - if i == 0 { - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, nil, nil, nil, newHasher()) // Empty genesis block + to := common.BytesToAddress([]byte{0x11}) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher()) // Empty genesis block + WriteBlock(chainDb, block) + WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) + for i := uint64(1); i <= 10; i++ { + var tx *types.Transaction + if i%2 == 0 { + tx = types.NewTx(&types.LegacyTx{ + Nonce: i, + GasPrice: big.NewInt(11111), + Gas: 1111, + To: &to, + Value: big.NewInt(111), + Data: []byte{0x11, 0x11, 0x11}, + }) } else { - tx := types.NewTransaction(i, common.BytesToAddress([]byte{0x11}), big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11}) - txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher()) + tx = types.NewTx(&types.AccessListTx{ + ChainID: big.NewInt(1337), + Nonce: i, + GasPrice: big.NewInt(11111), + Gas: 1111, + To: &to, + Value: big.NewInt(111), + Data: []byte{0x11, 0x11, 0x11}, + }) } + txs = append(txs, tx) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) } @@ -66,7 +86,7 @@ func TestChainIterator(t *testing.T) { numbers = append(numbers, int(h.number)) if len(h.hashes) > 0 { if got, exp := h.hashes[0], txs[h.number-1].Hash(); got != exp { - t.Fatalf("hash wrong, got %x exp %x", got, exp) + t.Fatalf("block %d: hash wrong, got %x exp %x", h.number, got, exp) } } } @@ -88,14 +108,37 @@ func TestIndexTransactions(t *testing.T) { var block *types.Block var txs []*types.Transaction - for i := uint64(0); i <= 10; i++ { - if i == 0 { - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, nil, nil, nil, newHasher()) // Empty genesis block + to := common.BytesToAddress([]byte{0x11}) + + // Write empty genesis block + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher()) + WriteBlock(chainDb, block) + WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) + + for i := uint64(1); i <= 10; i++ { + var tx *types.Transaction + if i%2 == 0 { + tx = types.NewTx(&types.LegacyTx{ + Nonce: i, + GasPrice: big.NewInt(11111), + Gas: 1111, + To: &to, + Value: big.NewInt(111), + Data: []byte{0x11, 0x11, 0x11}, + }) } else { - tx := types.NewTransaction(i, common.BytesToAddress([]byte{0x11}), big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11}) - txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher()) + tx = types.NewTx(&types.AccessListTx{ + ChainID: big.NewInt(1337), + Nonce: i, + GasPrice: big.NewInt(11111), + Gas: 1111, + To: &to, + Value: big.NewInt(111), + Data: []byte{0x11, 0x11, 0x11}, + }) } + txs = append(txs, tx) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) } @@ -108,10 +151,10 @@ func TestIndexTransactions(t *testing.T) { } number := ReadTxLookupEntry(chainDb, txs[i-1].Hash()) if exist && number == nil { - t.Fatalf("Transaction indice missing") + t.Fatalf("Transaction index %d missing", i) } if !exist && number != nil { - t.Fatalf("Transaction indice is not deleted") + t.Fatalf("Transaction index %d is not deleted", i) } } number := ReadTxIndexTail(chainDb) diff --git a/core/rawdb/database.go b/core/rawdb/database.go index aa11ff8f05..fb3e80308c 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -58,7 +58,10 @@ func (frdb *freezerdb) Close() error { // Freeze is a helper method used for external testing to trigger and block until // a freeze cycle completes, without having to sleep for a minute to trigger the // automatic background run. -func (frdb *freezerdb) Freeze(threshold uint64) { +func (frdb *freezerdb) Freeze(threshold uint64) error { + if frdb.AncientStore.(*freezer).readonly { + return errReadOnly + } // Set the freezer threshold to a temporary value defer func(old uint64) { atomic.StoreUint64(&frdb.AncientStore.(*freezer).threshold, old) @@ -69,6 +72,7 @@ func (frdb *freezerdb) Freeze(threshold uint64) { trigger := make(chan struct{}, 1) frdb.AncientStore.(*freezer).trigger <- trigger <-trigger + return nil } // nofreezedb is a database wrapper that disables freezer data retrievals. @@ -122,11 +126,11 @@ func NewDatabase(db ethdb.KeyValueStore) ethdb.Database { // NewDatabaseWithFreezerRemote creates a high level database on top of a given key- // value data store with a freezer moving immutable chain segments into cold // storage. -func NewDatabaseWithFreezerRemote(db ethdb.KeyValueStore, freezerURL string) (ethdb.Database, error) { +func NewDatabaseWithFreezerRemote(db ethdb.KeyValueStore, freezerURL string, readonly bool) (ethdb.Database, error) { // Create the idle freezer instance log.Info("New remote freezer", "freezer", freezerURL) - frdb, err := newFreezerRemoteClient(freezerURL) + frdb, err := newFreezerRemoteClient(freezerURL, readonly) if err != nil { log.Error("NewDatabaseWithFreezerRemote error", "error", err) return nil, err @@ -203,8 +207,9 @@ Please set --ancient.rpc to the correct path, and/or review the remote freezer's } } // Freezer is consistent with the key-value database, permit combining the two - go freezeRemote(db, frdb, frdb.threshold, frdb.quit, frdb.trigger) - + if !frdb.readonly { + go freezeRemote(db, frdb, frdb.threshold, frdb.quit, frdb.trigger) + } return &freezerdb{ KeyValueStore: db, AncientStore: frdb, @@ -214,9 +219,9 @@ Please set --ancient.rpc to the correct path, and/or review the remote freezer's // NewDatabaseWithFreezer creates a high level database on top of a given key- // value data store with a freezer moving immutable chain segments into cold // storage. -func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezerStr string, namespace string) (ethdb.Database, error) { +func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezerStr string, namespace string, readonly bool) (ethdb.Database, error) { // Create the idle freezer instance - frdb, err := newFreezer(freezerStr, namespace) + frdb, err := newFreezer(freezerStr, namespace, readonly) if err != nil { return nil, err } @@ -272,8 +277,9 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezerStr string, namespace } // Freezer is consistent with the key-value database, permit combining the two - go frdb.freeze(db) - + if !frdb.readonly { + go frdb.freeze(db) + } return &freezerdb{ KeyValueStore: db, AncientStore: frdb, @@ -295,8 +301,8 @@ func NewMemoryDatabaseWithCap(size int) ethdb.Database { // NewLevelDBDatabase creates a persistent key-value database without a freezer // moving immutable chain segments into cold storage. -func NewLevelDBDatabase(file string, cache int, handles int, namespace string) (ethdb.Database, error) { - db, err := leveldb.New(file, cache, handles, namespace) +func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) { + db, err := leveldb.New(file, cache, handles, namespace, readonly) if err != nil { return nil, err } @@ -305,12 +311,12 @@ func NewLevelDBDatabase(file string, cache int, handles int, namespace string) ( // NewLevelDBDatabaseWithFreezer creates a persistent key-value database with a // freezer moving immutable chain segments into cold storage. -func NewLevelDBDatabaseWithFreezerRemote(file string, cache int, handles int, freezerURL string) (ethdb.Database, error) { - kvdb, err := leveldb.New(file, cache, handles, "eth/db/chaindata") +func NewLevelDBDatabaseWithFreezerRemote(file string, cache int, handles int, freezerURL string, readonly bool) (ethdb.Database, error) { + kvdb, err := leveldb.New(file, cache, handles, "eth/db/chaindata", readonly) if err != nil { return nil, err } - frdb, err := NewDatabaseWithFreezerRemote(kvdb, freezerURL) + frdb, err := NewDatabaseWithFreezerRemote(kvdb, freezerURL, readonly) if err != nil { kvdb.Close() return nil, err @@ -320,12 +326,12 @@ func NewLevelDBDatabaseWithFreezerRemote(file string, cache int, handles int, fr // NewLevelDBDatabaseWithFreezer creates a persistent key-value database with a // freezer moving immutable chain segments into cold storage. -func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, freezer string, namespace string) (ethdb.Database, error) { - kvdb, err := leveldb.New(file, cache, handles, namespace) +func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, freezer string, namespace string, readonly bool) (ethdb.Database, error) { + kvdb, err := leveldb.New(file, cache, handles, namespace, readonly) if err != nil { return nil, err } - frdb, err := NewDatabaseWithFreezer(kvdb, freezer, namespace) + frdb, err := NewDatabaseWithFreezer(kvdb, freezer, namespace, readonly) if err != nil { kvdb.Close() return nil, err diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index 1da5694264..7988b665cc 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -35,6 +35,10 @@ import ( ) var ( + // errReadOnly is returned if the freezer is opened in read only mode. All the + // mutations are disallowed. + errReadOnly = errors.New("read only") + // errUnknownTable is returned if the user attempts to read from a table that is // not tracked by the freezer. errUnknownTable = errors.New("unknown table") @@ -73,6 +77,7 @@ type freezer struct { frozen uint64 // Number of blocks already frozen threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests) + readonly bool tables map[string]*freezerTable // Data tables for storing everything instanceLock fileutil.Releaser // File-system lock to prevent double opens @@ -84,7 +89,7 @@ type freezer struct { // newFreezer creates a chain freezer that moves ancient chain data into // append-only flat file containers. -func newFreezer(datadir string, namespace string) (*freezer, error) { +func newFreezer(datadir string, namespace string, readonly bool) (*freezer, error) { // Create the initial freezer object var ( readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil) @@ -106,6 +111,7 @@ func newFreezer(datadir string, namespace string) (*freezer, error) { } // Open all the supported data tables freezer := &freezer{ + readonly: readonly, threshold: vars.FullImmutabilityThreshold, tables: make(map[string]*freezerTable), instanceLock: lock, @@ -130,7 +136,7 @@ func newFreezer(datadir string, namespace string) (*freezer, error) { lock.Release() return nil, err } - log.Info("Opened ancient database", "database", datadir) + log.Info("Opened ancient database", "database", datadir, "readonly", readonly) return freezer, nil } @@ -138,7 +144,7 @@ func newFreezer(datadir string, namespace string) (*freezer, error) { func (f *freezer) Close() error { var errs []error f.closeOnce.Do(func() { - f.quit <- struct{}{} + close(f.quit) for _, table := range f.tables { if err := table.Close(); err != nil { errs = append(errs, err) @@ -191,6 +197,9 @@ func (f *freezer) AncientSize(kind string) (uint64, error) { // injection will be rejected. But if two injections with same number happen at // the same time, we can get into the trouble. func (f *freezer) AppendAncient(number uint64, hash, header, body, receipts, td []byte) (err error) { + if f.readonly { + return errReadOnly + } // Ensure the binary blobs we are appending is continuous with freezer. if atomic.LoadUint64(&f.frozen) != number { return errOutOrderInsertion @@ -233,6 +242,9 @@ func (f *freezer) AppendAncient(number uint64, hash, header, body, receipts, td // TruncateAncients discards any recent data above the provided threshold number. func (f *freezer) TruncateAncients(items uint64) error { + if f.readonly { + return errReadOnly + } if atomic.LoadUint64(&f.frozen) <= items { return nil } diff --git a/core/rawdb/freezer_remote_client.go b/core/rawdb/freezer_remote_client.go index d0ad602b99..64c1cf8125 100644 --- a/core/rawdb/freezer_remote_client.go +++ b/core/rawdb/freezer_remote_client.go @@ -20,6 +20,7 @@ type FreezerRemoteClient struct { threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests) trigger chan chan struct{} // Manual blocking freeze trigger, test determinism closeOnce sync.Once + readonly bool } const ( @@ -34,7 +35,7 @@ const ( ) // newFreezerRemoteClient constructs a rpc client to connect to a remote freezer -func newFreezerRemoteClient(endpoint string) (*FreezerRemoteClient, error) { +func newFreezerRemoteClient(endpoint string, readonly bool) (*FreezerRemoteClient, error) { client, err := rpc.Dial(endpoint) if err != nil { return nil, err @@ -44,6 +45,7 @@ func newFreezerRemoteClient(endpoint string) (*FreezerRemoteClient, error) { threshold: vars.FullImmutabilityThreshold, quit: make(chan struct{}), trigger: make(chan chan struct{}), + readonly: readonly, }, nil } diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go index 530a348540..4d6e415511 100644 --- a/core/state/pruner/pruner.go +++ b/core/state/pruner/pruner.go @@ -85,8 +85,12 @@ type Pruner struct { } // NewPruner creates the pruner instance. -func NewPruner(db ethdb.Database, headHeader *types.Header, datadir, trieCachePath string, bloomSize uint64) (*Pruner, error) { - snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headHeader.Root, false, false, false) +func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize uint64) (*Pruner, error) { + headBlock := rawdb.ReadHeadBlock(db) + if headBlock == nil { + return nil, errors.New("Failed to load head block") + } + snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, false) if err != nil { return nil, err // The relevant snapshot(s) might not exist } @@ -104,12 +108,12 @@ func NewPruner(db ethdb.Database, headHeader *types.Header, datadir, trieCachePa stateBloom: stateBloom, datadir: datadir, trieCachePath: trieCachePath, - headHeader: headHeader, + headHeader: headBlock.Header(), snaptree: snaptree, }, nil } -func prune(maindb ethdb.Database, stateBloom *stateBloom, middleStateRoots map[common.Hash]struct{}, start time.Time) error { +func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, stateBloom *stateBloom, bloomPath string, middleStateRoots map[common.Hash]struct{}, start time.Time) error { // Delete all stale trie nodes in the disk. With the help of state bloom // the trie nodes(and codes) belong to the active state will be filtered // out. A very small part of stale tries will also be filtered because of @@ -182,6 +186,25 @@ func prune(maindb ethdb.Database, stateBloom *stateBloom, middleStateRoots map[c iter.Release() log.Info("Pruned state data", "nodes", count, "size", size, "elapsed", common.PrettyDuration(time.Since(pstart))) + // Pruning is done, now drop the "useless" layers from the snapshot. + // Firstly, flushing the target layer into the disk. After that all + // diff layers below the target will all be merged into the disk. + if err := snaptree.Cap(root, 0); err != nil { + return err + } + // Secondly, flushing the snapshot journal into the disk. All diff + // layers upon are dropped silently. Eventually the entire snapshot + // tree is converted into a single disk layer with the pruning target + // as the root. + if _, err := snaptree.Journal(root); err != nil { + return err + } + // Delete the state bloom, it marks the entire pruning procedure is + // finished. If any crashes or manual exit happens before this, + // `RecoverPruning` will pick it up in the next restarts to redo all + // the things. + os.RemoveAll(bloomPath) + // Start compactions, will remove the deleted data from the disk immediately. // Note for small pruning, the compaction is skipped. if count >= rangeCompactionThreshold { @@ -310,29 +333,7 @@ func (p *Pruner) Prune(root common.Hash) error { return err } log.Info("State bloom filter committed", "name", filterName) - - if err := prune(p.db, p.stateBloom, middleRoots, start); err != nil { - return err - } - // Pruning is done, now drop the "useless" layers from the snapshot. - // Firstly, flushing the target layer into the disk. After that all - // diff layers below the target will all be merged into the disk. - if err := p.snaptree.Cap(root, 0); err != nil { - return err - } - // Secondly, flushing the snapshot journal into the disk. All diff - // layers upon the target layer are dropped silently. Eventually the - // entire snapshot tree is converted into a single disk layer with - // the pruning target as the root. - if _, err := p.snaptree.Journal(root); err != nil { - return err - } - // Delete the state bloom, it marks the entire pruning procedure is - // finished. If any crashes or manual exit happens before this, - // `RecoverPruning` will pick it up in the next restarts to redo all - // the things. - os.RemoveAll(filterName) - return nil + return prune(p.snaptree, root, p.db, p.stateBloom, filterName, middleRoots, start) } // RecoverPruning will resume the pruning procedure during the system restart. @@ -350,9 +351,9 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err if stateBloomPath == "" { return nil // nothing to recover } - headHeader, err := getHeadHeader(db) - if err != nil { - return err + headBlock := rawdb.ReadHeadBlock(db) + if headBlock == nil { + return errors.New("Failed to load head block") } // Initialize the snapshot tree in recovery mode to handle this special case: // - Users run the `prune-state` command multiple times @@ -362,7 +363,7 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err // - The state HEAD is rewound already because of multiple incomplete `prune-state` // In this case, even the state HEAD is not exactly matched with snapshot, it // still feasible to recover the pruning correctly. - snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headHeader.Root, false, false, true) + snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, true) if err != nil { return err // The relevant snapshot(s) might not exist } @@ -382,7 +383,7 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err // otherwise the dangling state will be left. var ( found bool - layers = snaptree.Snapshots(headHeader.Root, 128, true) + layers = snaptree.Snapshots(headBlock.Root(), 128, true) middleRoots = make(map[common.Hash]struct{}) ) for _, layer := range layers { @@ -396,28 +397,7 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err log.Error("Pruning target state is not existent") return errors.New("non-existent target state") } - if err := prune(db, stateBloom, middleRoots, time.Now()); err != nil { - return err - } - // Pruning is done, now drop the "useless" layers from the snapshot. - // Firstly, flushing the target layer into the disk. After that all - // diff layers below the target will all be merged into the disk. - if err := snaptree.Cap(stateBloomRoot, 0); err != nil { - return err - } - // Secondly, flushing the snapshot journal into the disk. All diff - // layers upon are dropped silently. Eventually the entire snapshot - // tree is converted into a single disk layer with the pruning target - // as the root. - if _, err := snaptree.Journal(stateBloomRoot); err != nil { - return err - } - // Delete the state bloom, it marks the entire pruning procedure is - // finished. If any crashes or manual exit happens before this, - // `RecoverPruning` will pick it up in the next restarts to redo all - // the things. - os.RemoveAll(stateBloomPath) - return nil + return prune(snaptree, stateBloomRoot, db, stateBloom, stateBloomPath, middleRoots, time.Now()) } // extractGenesis loads the genesis state and commits all the state entries @@ -506,22 +486,6 @@ func findBloomFilter(datadir string) (string, common.Hash, error) { return stateBloomPath, stateBloomRoot, nil } -func getHeadHeader(db ethdb.Database) (*types.Header, error) { - headHeaderHash := rawdb.ReadHeadBlockHash(db) - if headHeaderHash == (common.Hash{}) { - return nil, errors.New("empty head block hash") - } - headHeaderNumber := rawdb.ReadHeaderNumber(db, headHeaderHash) - if headHeaderNumber == nil { - return nil, errors.New("empty head block number") - } - headHeader := rawdb.ReadHeader(db, headHeaderHash, *headHeaderNumber) - if headHeader == nil { - return nil, errors.New("empty head header") - } - return headHeader, nil -} - const warningLog = ` WARNING! diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go index 9c86a679d1..ee88938b77 100644 --- a/core/state/snapshot/difflayer.go +++ b/core/state/snapshot/difflayer.go @@ -296,13 +296,17 @@ func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) { if !hit { hit = dl.diffed.Contains(destructBloomHasher(hash)) } + var origin *diskLayer + if !hit { + origin = dl.origin // extract origin while holding the lock + } dl.lock.RUnlock() // If the bloom filter misses, don't even bother with traversing the memory // diff layers, reach straight into the bottom persistent disk layer - if !hit { + if origin != nil { snapshotBloomAccountMissMeter.Mark(1) - return dl.origin.AccountRLP(hash) + return origin.AccountRLP(hash) } // The bloom filter hit, start poking in the internal maps return dl.accountRLP(hash, 0) @@ -358,13 +362,17 @@ func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro if !hit { hit = dl.diffed.Contains(destructBloomHasher(accountHash)) } + var origin *diskLayer + if !hit { + origin = dl.origin // extract origin while holding the lock + } dl.lock.RUnlock() // If the bloom filter misses, don't even bother with traversing the memory // diff layers, reach straight into the bottom persistent disk layer - if !hit { + if origin != nil { snapshotBloomStorageMissMeter.Mark(1) - return dl.origin.Storage(accountHash, storageHash) + return origin.Storage(accountHash, storageHash) } // The bloom filter hit, start poking in the internal maps return dl.storage(accountHash, storageHash, 0) diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go index 6beb944e07..ccde2fc094 100644 --- a/core/state/snapshot/disklayer_test.go +++ b/core/state/snapshot/disklayer_test.go @@ -524,7 +524,7 @@ func TestDiskSeek(t *testing.T) { t.Fatal(err) } else { defer os.RemoveAll(dir) - diskdb, err := leveldb.New(dir, 256, 0, "") + diskdb, err := leveldb.New(dir, 256, 0, "", false) if err != nil { t.Fatal(err) } diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index aa5f5900b0..eccf377264 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -283,11 +283,11 @@ func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs m return errSnapshotCycle } // Generate a new snapshot on top of the parent - parent := t.Snapshot(parentRoot).(snapshot) + parent := t.Snapshot(parentRoot) if parent == nil { return fmt.Errorf("parent [%#x] snapshot missing", parentRoot) } - snap := parent.Update(blockRoot, destructs, accounts, storage) + snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage) // Save the new snapshot for later t.lock.Lock() @@ -484,8 +484,17 @@ func diffToDisk(bottom *diffLayer) *diskLayer { if key := it.Key(); len(key) == 65 { // TODO(karalabe): Yuck, we should move this into the iterator batch.Delete(key) base.cache.Del(key[1:]) - snapshotFlushStorageItemMeter.Mark(1) + + // Ensure we don't delete too much data blindly (contract can be + // huge). It's ok to flush, the root will go missing in case of a + // crash and we'll detect and regenerate the snapshot. + if batch.ValueSize() > ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { + log.Crit("Failed to write storage deletions", "err", err) + } + batch.Reset() + } } } it.Release() @@ -503,6 +512,16 @@ func diffToDisk(bottom *diffLayer) *diskLayer { snapshotFlushAccountItemMeter.Mark(1) snapshotFlushAccountSizeMeter.Mark(int64(len(data))) + + // Ensure we don't write too much data blindly. It's ok to flush, the + // root will go missing in case of a crash and we'll detect and regen + // the snapshot. + if batch.ValueSize() > ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { + log.Crit("Failed to write storage deletions", "err", err) + } + batch.Reset() + } } // Push all the storage slots into the database for accountHash, storage := range bottom.storageData { diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 631f19ab06..3714cd6f3c 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -87,7 +87,7 @@ func TestStateProcessorErrors(t *testing.T) { txs: []*types.Transaction{ makeTx(0, common.Address{}, big.NewInt(0), vars.TxGas, big.NewInt(0xffffff), nil), }, - want: "could not apply tx 0 [0xaa3f7d86802b1f364576d9071bf231e31d61b392d306831ac9cf706ff5371ce0]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 0 want 352321515000", + want: "could not apply tx 0 [0xaa3f7d86802b1f364576d9071bf231e31d61b392d306831ac9cf706ff5371ce0]: insufficient funds for gas * price + value", }, { txs: []*types.Transaction{ diff --git a/core/state_transition.go b/core/state_transition.go index 8b136ad589..841a7f2de4 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -262,7 +262,6 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { if st.evm.ChainConfig().IsEnabled(st.evm.ChainConfig().GetEIP2929Transition, st.evm.Context.BlockNumber) { st.state.PrepareAccessList(msg.From(), msg.To(), st.evm.ActivePrecompiles(), msg.AccessList()) } - var ( ret []byte vmerr error // vm errors do not effect consensus and are therefore not assigned to err diff --git a/core/tx_pool.go b/core/tx_pool.go index 8b4010d344..640a89ee4e 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -965,7 +965,7 @@ func (pool *TxPool) RemoveTx(hash common.Hash) *types.Transaction { return tx } -// requestPromoteExecutables requests a pool reset to the new head block. +// requestReset requests a pool reset to the new head block. // The returned channel is closed when the reset has occurred. func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { select { diff --git a/core/types/hashing.go b/core/types/hashing.go index 71efb25a9a..3227cf8a72 100644 --- a/core/types/hashing.go +++ b/core/types/hashing.go @@ -36,6 +36,7 @@ var encodeBufferPool = sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, } +// rlpHash encodes x and hashes the encoded bytes. func rlpHash(x interface{}) (h common.Hash) { sha := hasherPool.Get().(crypto.KeccakState) defer hasherPool.Put(sha) @@ -45,8 +46,8 @@ func rlpHash(x interface{}) (h common.Hash) { return h } -// prefixedRlpHash writes the prefix into the hasher before rlp-encoding the -// given interface. It's used for typed transactions. +// prefixedRlpHash writes the prefix into the hasher before rlp-encoding x. +// It's used for typed transactions. func prefixedRlpHash(prefix byte, x interface{}) (h common.Hash) { sha := hasherPool.Get().(crypto.KeccakState) defer hasherPool.Put(sha) diff --git a/core/types/hashing_test.go b/core/types/hashing_test.go index a948b10ef6..6d1ebf897c 100644 --- a/core/types/hashing_test.go +++ b/core/types/hashing_test.go @@ -1,3 +1,19 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + package types_test import ( diff --git a/core/types/receipt.go b/core/types/receipt.go index 686e54f62a..7c0f8d1efb 100644 --- a/core/types/receipt.go +++ b/core/types/receipt.go @@ -38,6 +38,7 @@ var ( receiptStatusSuccessfulRLP = []byte{0x01} ) +// This error is returned when a typed receipt is decoded, but the string is empty. var errEmptyTypedReceipt = errors.New("empty typed receipt bytes") const ( diff --git a/core/types/transaction.go b/core/types/transaction.go index 49127630ae..a35e07a5a3 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -293,7 +293,7 @@ func (tx *Transaction) RawSignatureValues() (v, r, s *big.Int) { // GasPriceCmp compares the gas prices of two transactions. func (tx *Transaction) GasPriceCmp(other *Transaction) int { - return tx.inner.gasPrice().Cmp(other.GasPrice()) + return tx.inner.gasPrice().Cmp(other.inner.gasPrice()) } // GasPriceIntCmp compares the gas price of the transaction against the given price. diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index be8408adaa..92ba21296f 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -207,11 +207,7 @@ func (s eip2930Signer) Sender(tx *Transaction) (common.Address, error) { func (s eip2930Signer) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big.Int, err error) { switch txdata := tx.inner.(type) { case *LegacyTx: - R, S, V = decodeSignature(sig) - if s.chainId.Sign() != 0 { - V = big.NewInt(int64(sig[64] + 35)) - V.Add(V, s.chainIdMul) - } + return s.EIP155Signer.SignatureValues(tx, sig) case *AccessListTx: // Check that chain ID of tx matches the signer. We also accept ID zero here, // because it indicates that the chain ID was not specified in the tx. diff --git a/core/vm/access_list_tracer.go b/core/vm/access_list_tracer.go new file mode 100644 index 0000000000..7388ade041 --- /dev/null +++ b/core/vm/access_list_tracer.go @@ -0,0 +1,181 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package vm + +import ( + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// accessList is an accumulator for the set of accounts and storage slots an EVM +// contract execution touches. +type accessList map[common.Address]accessListSlots + +// accessListSlots is an accumulator for the set of storage slots within a single +// contract that an EVM contract execution touches. +type accessListSlots map[common.Hash]struct{} + +// newAccessList creates a new accessList. +func newAccessList() accessList { + return make(map[common.Address]accessListSlots) +} + +// addAddress adds an address to the accesslist. +func (al accessList) addAddress(address common.Address) { + // Set address if not previously present + if _, present := al[address]; !present { + al[address] = make(map[common.Hash]struct{}) + } +} + +// addSlot adds a storage slot to the accesslist. +func (al accessList) addSlot(address common.Address, slot common.Hash) { + // Set address if not previously present + al.addAddress(address) + + // Set the slot on the surely existent storage set + al[address][slot] = struct{}{} +} + +// equal checks if the content of the current access list is the same as the +// content of the other one. +func (al accessList) equal(other accessList) bool { + // Cross reference the accounts first + if len(al) != len(other) { + return false + } + for addr := range al { + if _, ok := other[addr]; !ok { + return false + } + } + for addr := range other { + if _, ok := al[addr]; !ok { + return false + } + } + // Accounts match, cross reference the storage slots too + for addr, slots := range al { + otherslots := other[addr] + + if len(slots) != len(otherslots) { + return false + } + for hash := range slots { + if _, ok := otherslots[hash]; !ok { + return false + } + } + for hash := range otherslots { + if _, ok := slots[hash]; !ok { + return false + } + } + } + return true +} + +// accesslist converts the accesslist to a types.AccessList. +func (al accessList) accessList() types.AccessList { + acl := make(types.AccessList, 0, len(al)) + for addr, slots := range al { + tuple := types.AccessTuple{Address: addr} + for slot := range slots { + tuple.StorageKeys = append(tuple.StorageKeys, slot) + } + acl = append(acl, tuple) + } + return acl +} + +// AccessListTracer is a tracer that accumulates touched accounts and storage +// slots into an internal set. +type AccessListTracer struct { + excl map[common.Address]struct{} // Set of account to exclude from the list + list accessList // Set of accounts and storage slots touched +} + +// NewAccessListTracer creates a new tracer that can generate AccessLists. +// An optional AccessList can be specified to occupy slots and addresses in +// the resulting accesslist. +func NewAccessListTracer(acl types.AccessList, from, to common.Address, precompiles []common.Address) *AccessListTracer { + excl := map[common.Address]struct{}{ + from: {}, to: {}, + } + for _, addr := range precompiles { + excl[addr] = struct{}{} + } + list := newAccessList() + for _, al := range acl { + if _, ok := excl[al.Address]; !ok { + list.addAddress(al.Address) + } + for _, slot := range al.StorageKeys { + list.addSlot(al.Address, slot) + } + } + return &AccessListTracer{ + excl: excl, + list: list, + } +} + +func (a *AccessListTracer) CapturePreEVM(env *EVM, inputs map[string]interface{}) { +} + +func (a *AccessListTracer) CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { +} + +// CaptureState captures all opcodes that touch storage or addresses and adds them to the accesslist. +func (a *AccessListTracer) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) { + stack := scope.Stack + if (op == SLOAD || op == SSTORE) && stack.len() >= 1 { + slot := common.Hash(stack.data[stack.len()-1].Bytes32()) + a.list.addSlot(scope.Contract.Address(), slot) + } + if (op == EXTCODECOPY || op == EXTCODEHASH || op == EXTCODESIZE || op == BALANCE || op == SELFDESTRUCT) && stack.len() >= 1 { + addr := common.Address(stack.data[stack.len()-1].Bytes20()) + if _, ok := a.excl[addr]; !ok { + a.list.addAddress(addr) + } + } + if (op == DELEGATECALL || op == CALL || op == STATICCALL || op == CALLCODE) && stack.len() >= 5 { + addr := common.Address(stack.data[stack.len()-2].Bytes20()) + if _, ok := a.excl[addr]; !ok { + a.list.addAddress(addr) + } + } +} + +func (*AccessListTracer) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) { +} + +func (*AccessListTracer) CaptureEnd(env *EVM, output []byte, gasUsed uint64, t time.Duration, err error) { +} + +// AccessList returns the current accesslist maintained by the tracer. +func (a *AccessListTracer) AccessList() types.AccessList { + return a.list.accessList() +} + +// Equal returns if the content of two access list traces are equal. +func (a *AccessListTracer) Equal(other *AccessListTracer) bool { + return a.list.equal(other.list) +} diff --git a/core/vm/eips.go b/core/vm/eips.go index 4990951cb7..ef1d12c9cc 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -80,9 +80,9 @@ func enable1884(jt *JumpTable) { enableSelfBalance(jt) } -func opSelfBalance(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - balance, _ := uint256.FromBig(interpreter.evm.StateDB.GetBalance(callContext.contract.Address())) - callContext.stack.push(balance) +func opSelfBalance(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + balance, _ := uint256.FromBig(interpreter.evm.StateDB.GetBalance(scope.Contract.Address())) + scope.Stack.push(balance) return nil, nil } @@ -99,9 +99,9 @@ func enable1344(jt *JumpTable) { } // opChainID implements CHAINID opcode -func opChainID(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opChainID(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { chainId, _ := uint256.FromBig(interpreter.evm.chainConfig.GetChainID()) - callContext.stack.push(chainId) + scope.Stack.push(chainId) return nil, nil } diff --git a/core/vm/evm.go b/core/vm/evm.go index 6be1e9490d..506016039a 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -221,7 +221,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas if !isPrecompile && evm.ChainConfig().IsEnabled(evm.chainConfig.GetEIP161abcTransition, evm.Context.BlockNumber) && value.Sign() == 0 { // Calling a non existing account, don't do anything, but ping the tracer if evm.vmConfig.Debug && evm.depth == 0 { - evm.vmConfig.Tracer.CaptureStart(caller.Address(), addr, false, input, gas, value) + evm.vmConfig.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value) evm.vmConfig.Tracer.CaptureEnd(evm, ret, 0, 0, nil) } return nil, gas, nil @@ -232,7 +232,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas // Capture the tracer start/end events in debug mode if evm.vmConfig.Debug && evm.depth == 0 { - evm.vmConfig.Tracer.CaptureStart(caller.Address(), addr, false, input, gas, value) + evm.vmConfig.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value) defer func(startGas uint64, startTime time.Time) { // Lazy evaluation of the parameters evm.vmConfig.Tracer.CaptureEnd(evm, ret, startGas-gas, time.Since(startTime), err) }(gas, time.Now()) @@ -456,7 +456,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } if evm.vmConfig.Debug && evm.depth == 0 { - evm.vmConfig.Tracer.CaptureStart(caller.Address(), address, true, codeAndHash.code, gas, value) + evm.vmConfig.Tracer.CaptureStart(evm, caller.Address(), address, true, codeAndHash.code, gas, value) } start := time.Now() diff --git a/core/vm/instructions.go b/core/vm/instructions.go index b2717fd022..f83400cdc6 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -24,68 +24,68 @@ import ( "golang.org/x/crypto/sha3" ) -func opAdd(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opAdd(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.Add(&x, y) return nil, nil } -func opSub(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opSub(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.Sub(&x, y) return nil, nil } -func opMul(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opMul(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.Mul(&x, y) return nil, nil } -func opDiv(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opDiv(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.Div(&x, y) return nil, nil } -func opSdiv(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opSdiv(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.SDiv(&x, y) return nil, nil } -func opMod(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opMod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.Mod(&x, y) return nil, nil } -func opSmod(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opSmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.SMod(&x, y) return nil, nil } -func opExp(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - base, exponent := callContext.stack.pop(), callContext.stack.peek() +func opExp(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + base, exponent := scope.Stack.pop(), scope.Stack.peek() exponent.Exp(&base, exponent) return nil, nil } -func opSignExtend(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - back, num := callContext.stack.pop(), callContext.stack.peek() +func opSignExtend(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + back, num := scope.Stack.pop(), scope.Stack.peek() num.ExtendSign(num, &back) return nil, nil } -func opNot(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x := callContext.stack.peek() +func opNot(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x := scope.Stack.peek() x.Not(x) return nil, nil } -func opLt(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opLt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() if x.Lt(y) { y.SetOne() } else { @@ -94,8 +94,8 @@ func opLt(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte return nil, nil } -func opGt(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opGt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() if x.Gt(y) { y.SetOne() } else { @@ -104,8 +104,8 @@ func opGt(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte return nil, nil } -func opSlt(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opSlt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() if x.Slt(y) { y.SetOne() } else { @@ -114,8 +114,8 @@ func opSlt(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byt return nil, nil } -func opSgt(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opSgt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() if x.Sgt(y) { y.SetOne() } else { @@ -124,8 +124,8 @@ func opSgt(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byt return nil, nil } -func opEq(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opEq(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() if x.Eq(y) { y.SetOne() } else { @@ -134,8 +134,8 @@ func opEq(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte return nil, nil } -func opIszero(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x := callContext.stack.peek() +func opIszero(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x := scope.Stack.peek() if x.IsZero() { x.SetOne() } else { @@ -144,32 +144,32 @@ func opIszero(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([] return nil, nil } -func opAnd(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opAnd(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.And(&x, y) return nil, nil } -func opOr(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opOr(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.Or(&x, y) return nil, nil } -func opXor(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y := callContext.stack.pop(), callContext.stack.peek() +func opXor(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y := scope.Stack.pop(), scope.Stack.peek() y.Xor(&x, y) return nil, nil } -func opByte(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - th, val := callContext.stack.pop(), callContext.stack.peek() +func opByte(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + th, val := scope.Stack.pop(), scope.Stack.peek() val.Byte(&th) return nil, nil } -func opAddmod(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y, z := callContext.stack.pop(), callContext.stack.pop(), callContext.stack.peek() +func opAddmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y, z := scope.Stack.pop(), scope.Stack.pop(), scope.Stack.peek() if z.IsZero() { z.Clear() } else { @@ -178,8 +178,8 @@ func opAddmod(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([] return nil, nil } -func opMulmod(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x, y, z := callContext.stack.pop(), callContext.stack.pop(), callContext.stack.peek() +func opMulmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x, y, z := scope.Stack.pop(), scope.Stack.pop(), scope.Stack.peek() z.MulMod(&x, &y, z) return nil, nil } @@ -187,9 +187,9 @@ func opMulmod(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([] // opSHL implements Shift Left // The SHL instruction (shift left) pops 2 values from the stack, first arg1 and then arg2, // and pushes on the stack arg2 shifted to the left by arg1 number of bits. -func opSHL(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opSHL(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { // Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards - shift, value := callContext.stack.pop(), callContext.stack.peek() + shift, value := scope.Stack.pop(), scope.Stack.peek() if shift.LtUint64(256) { value.Lsh(value, uint(shift.Uint64())) } else { @@ -201,9 +201,9 @@ func opSHL(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byt // opSHR implements Logical Shift Right // The SHR instruction (logical shift right) pops 2 values from the stack, first arg1 and then arg2, // and pushes on the stack arg2 shifted to the right by arg1 number of bits with zero fill. -func opSHR(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opSHR(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { // Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards - shift, value := callContext.stack.pop(), callContext.stack.peek() + shift, value := scope.Stack.pop(), scope.Stack.peek() if shift.LtUint64(256) { value.Rsh(value, uint(shift.Uint64())) } else { @@ -215,8 +215,8 @@ func opSHR(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byt // opSAR implements Arithmetic Shift Right // The SAR instruction (arithmetic shift right) pops 2 values from the stack, first arg1 and then arg2, // and pushes on the stack arg2 shifted to the right by arg1 number of bits with sign extension. -func opSAR(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - shift, value := callContext.stack.pop(), callContext.stack.peek() +func opSAR(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + shift, value := scope.Stack.pop(), scope.Stack.peek() if shift.GtUint64(256) { if value.Sign() >= 0 { value.Clear() @@ -231,9 +231,9 @@ func opSAR(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byt return nil, nil } -func opSha3(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - offset, size := callContext.stack.pop(), callContext.stack.peek() - data := callContext.memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64())) +func opSha3(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + offset, size := scope.Stack.pop(), scope.Stack.peek() + data := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64())) if interpreter.hasher == nil { interpreter.hasher = sha3.NewLegacyKeccak256().(keccakState) @@ -251,37 +251,37 @@ func opSha3(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]by size.SetBytes(interpreter.hasherBuf[:]) return nil, nil } -func opAddress(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetBytes(callContext.contract.Address().Bytes())) +func opAddress(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Address().Bytes())) return nil, nil } -func opBalance(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - slot := callContext.stack.peek() +func opBalance(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + slot := scope.Stack.peek() address := common.Address(slot.Bytes20()) slot.SetFromBig(interpreter.evm.StateDB.GetBalance(address)) return nil, nil } -func opOrigin(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetBytes(interpreter.evm.Origin.Bytes())) +func opOrigin(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetBytes(interpreter.evm.Origin.Bytes())) return nil, nil } -func opCaller(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetBytes(callContext.contract.Caller().Bytes())) +func opCaller(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Caller().Bytes())) return nil, nil } -func opCallValue(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - v, _ := uint256.FromBig(callContext.contract.value) - callContext.stack.push(v) +func opCallValue(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + v, _ := uint256.FromBig(scope.Contract.value) + scope.Stack.push(v) return nil, nil } -func opCallDataLoad(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - x := callContext.stack.peek() +func opCallDataLoad(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + x := scope.Stack.peek() if offset, overflow := x.Uint64WithOverflow(); !overflow { - data := getData(callContext.contract.Input, offset, 32) + data := getData(scope.Contract.Input, offset, 32) x.SetBytes(data) } else { x.Clear() @@ -289,16 +289,16 @@ func opCallDataLoad(pc *uint64, interpreter *EVMInterpreter, callContext *callCt return nil, nil } -func opCallDataSize(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetUint64(uint64(len(callContext.contract.Input)))) +func opCallDataSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(scope.Contract.Input)))) return nil, nil } -func opCallDataCopy(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opCallDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { var ( - memOffset = callContext.stack.pop() - dataOffset = callContext.stack.pop() - length = callContext.stack.pop() + memOffset = scope.Stack.pop() + dataOffset = scope.Stack.pop() + length = scope.Stack.pop() ) dataOffset64, overflow := dataOffset.Uint64WithOverflow() if overflow { @@ -307,21 +307,21 @@ func opCallDataCopy(pc *uint64, interpreter *EVMInterpreter, callContext *callCt // These values are checked for overflow during gas cost calculation memOffset64 := memOffset.Uint64() length64 := length.Uint64() - callContext.memory.Set(memOffset64, length64, getData(callContext.contract.Input, dataOffset64, length64)) + scope.Memory.Set(memOffset64, length64, getData(scope.Contract.Input, dataOffset64, length64)) return nil, nil } -func opReturnDataSize(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetUint64(uint64(len(interpreter.returnData)))) +func opReturnDataSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(interpreter.returnData)))) return nil, nil } -func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { var ( - memOffset = callContext.stack.pop() - dataOffset = callContext.stack.pop() - length = callContext.stack.pop() + memOffset = scope.Stack.pop() + dataOffset = scope.Stack.pop() + length = scope.Stack.pop() ) offset64, overflow := dataOffset.Uint64WithOverflow() @@ -335,42 +335,42 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, callContext *call if overflow || uint64(len(interpreter.returnData)) < end64 { return nil, ErrReturnDataOutOfBounds } - callContext.memory.Set(memOffset.Uint64(), length.Uint64(), interpreter.returnData[offset64:end64]) + scope.Memory.Set(memOffset.Uint64(), length.Uint64(), interpreter.returnData[offset64:end64]) return nil, nil } -func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - slot := callContext.stack.peek() +func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + slot := scope.Stack.peek() slot.SetUint64(uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20()))) return nil, nil } -func opCodeSize(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { l := new(uint256.Int) - l.SetUint64(uint64(len(callContext.contract.Code))) - callContext.stack.push(l) + l.SetUint64(uint64(len(scope.Contract.Code))) + scope.Stack.push(l) return nil, nil } -func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { var ( - memOffset = callContext.stack.pop() - codeOffset = callContext.stack.pop() - length = callContext.stack.pop() + memOffset = scope.Stack.pop() + codeOffset = scope.Stack.pop() + length = scope.Stack.pop() ) uint64CodeOffset, overflow := codeOffset.Uint64WithOverflow() if overflow { uint64CodeOffset = 0xffffffffffffffff } - codeCopy := getData(callContext.contract.Code, uint64CodeOffset, length.Uint64()) - callContext.memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) + codeCopy := getData(scope.Contract.Code, uint64CodeOffset, length.Uint64()) + scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) return nil, nil } -func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { var ( - stack = callContext.stack + stack = scope.Stack a = stack.pop() memOffset = stack.pop() codeOffset = stack.pop() @@ -382,7 +382,7 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx } addr := common.Address(a.Bytes20()) codeCopy := getData(interpreter.evm.StateDB.GetCode(addr), uint64CodeOffset, length.Uint64()) - callContext.memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) + scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) return nil, nil } @@ -413,8 +413,8 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx // // (6) Caller tries to get the code hash for an account which is marked as deleted, // this account should be regarded as a non-existent account and zero should be returned. -func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - slot := callContext.stack.peek() +func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + slot := scope.Stack.peek() address := common.Address(slot.Bytes20()) if interpreter.evm.StateDB.Empty(address) { slot.Clear() @@ -424,14 +424,14 @@ func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx return nil, nil } -func opGasprice(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opGasprice(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { v, _ := uint256.FromBig(interpreter.evm.GasPrice) - callContext.stack.push(v) + scope.Stack.push(v) return nil, nil } -func opBlockhash(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - num := callContext.stack.peek() +func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + num := scope.Stack.peek() num64, overflow := num.Uint64WithOverflow() if overflow { num.Clear() @@ -452,88 +452,88 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) return nil, nil } -func opCoinbase(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetBytes(interpreter.evm.Context.Coinbase.Bytes())) +func opCoinbase(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetBytes(interpreter.evm.Context.Coinbase.Bytes())) return nil, nil } -func opTimestamp(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opTimestamp(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { v, _ := uint256.FromBig(interpreter.evm.Context.Time) - callContext.stack.push(v) + scope.Stack.push(v) return nil, nil } -func opNumber(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opNumber(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { v, _ := uint256.FromBig(interpreter.evm.Context.BlockNumber) - callContext.stack.push(v) + scope.Stack.push(v) return nil, nil } -func opDifficulty(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opDifficulty(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { v, _ := uint256.FromBig(interpreter.evm.Context.Difficulty) - callContext.stack.push(v) + scope.Stack.push(v) return nil, nil } -func opGasLimit(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetUint64(interpreter.evm.Context.GasLimit)) +func opGasLimit(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetUint64(interpreter.evm.Context.GasLimit)) return nil, nil } -func opPop(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.pop() +func opPop(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.pop() return nil, nil } -func opMload(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - v := callContext.stack.peek() +func opMload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + v := scope.Stack.peek() offset := int64(v.Uint64()) - v.SetBytes(callContext.memory.GetPtr(offset, 32)) + v.SetBytes(scope.Memory.GetPtr(offset, 32)) return nil, nil } -func opMstore(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opMstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { // pop value of the stack - mStart, val := callContext.stack.pop(), callContext.stack.pop() - callContext.memory.Set32(mStart.Uint64(), &val) + mStart, val := scope.Stack.pop(), scope.Stack.pop() + scope.Memory.Set32(mStart.Uint64(), &val) return nil, nil } -func opMstore8(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - off, val := callContext.stack.pop(), callContext.stack.pop() - callContext.memory.store[off.Uint64()] = byte(val.Uint64()) +func opMstore8(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + off, val := scope.Stack.pop(), scope.Stack.pop() + scope.Memory.store[off.Uint64()] = byte(val.Uint64()) return nil, nil } -func opSload(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - loc := callContext.stack.peek() +func opSload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + loc := scope.Stack.peek() hash := common.Hash(loc.Bytes32()) - val := interpreter.evm.StateDB.GetState(callContext.contract.Address(), hash) + val := interpreter.evm.StateDB.GetState(scope.Contract.Address(), hash) loc.SetBytes(val.Bytes()) return nil, nil } -func opSstore(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - loc := callContext.stack.pop() - val := callContext.stack.pop() - interpreter.evm.StateDB.SetState(callContext.contract.Address(), +func opSstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + loc := scope.Stack.pop() + val := scope.Stack.pop() + interpreter.evm.StateDB.SetState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32()) return nil, nil } -func opJump(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - pos := callContext.stack.pop() - if !callContext.contract.validJumpdest(&pos) { +func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + pos := scope.Stack.pop() + if !scope.Contract.validJumpdest(&pos) { return nil, ErrInvalidJump } *pc = pos.Uint64() return nil, nil } -func opJumpi(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - pos, cond := callContext.stack.pop(), callContext.stack.pop() +func opJumpi(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + pos, cond := scope.Stack.pop(), scope.Stack.pop() if !cond.IsZero() { - if !callContext.contract.validJumpdest(&pos) { + if !scope.Contract.validJumpdest(&pos) { return nil, ErrInvalidJump } *pc = pos.Uint64() @@ -543,30 +543,30 @@ func opJumpi(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]b return nil, nil } -func opJumpdest(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opJumpdest(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { return nil, nil } -func opPc(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetUint64(*pc)) +func opPc(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetUint64(*pc)) return nil, nil } -func opMsize(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetUint64(uint64(callContext.memory.Len()))) +func opMsize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetUint64(uint64(scope.Memory.Len()))) return nil, nil } -func opGas(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetUint64(callContext.contract.Gas)) +func opGas(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.push(new(uint256.Int).SetUint64(scope.Contract.Gas)) return nil, nil } -func opCreate(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { var ( - value = callContext.stack.pop() - offset, size = callContext.stack.pop(), callContext.stack.pop() - input = callContext.memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) + value = scope.Stack.pop() + offset, size = scope.Stack.pop(), scope.Stack.pop() + input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) gas = interpreter.evm.CallGasTemp ) // reuse size int for stackvalue @@ -578,7 +578,7 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([] bigVal = value.ToBig() } - res, addr, returnGas, suberr := interpreter.evm.Create(callContext.contract, input, gas, bigVal) + res, addr, returnGas, suberr := interpreter.evm.Create(scope.Contract, input, gas, bigVal) // Push item on the stack based on the returned error. If the ruleset is // homestead we must check for CodeStoreOutOfGasError (homestead only // rule) and treat as an error, if the ruleset is frontier we must @@ -592,8 +592,8 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([] } else { stackvalue.SetBytes(addr.Bytes()) } - callContext.stack.push(&stackvalue) - callContext.contract.Gas += returnGas + scope.Stack.push(&stackvalue) + scope.Contract.Gas += returnGas if suberr == ErrExecutionReverted { return res, nil @@ -601,14 +601,15 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([] return nil, nil } -func opCreate2(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { var ( - endowment = callContext.stack.pop() - offset, size = callContext.stack.pop(), callContext.stack.pop() - salt = callContext.stack.pop() - input = callContext.memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) + endowment = scope.Stack.pop() + offset, size = scope.Stack.pop(), scope.Stack.pop() + salt = scope.Stack.pop() + input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) gas = interpreter.evm.CallGasTemp ) + // reuse size int for stackvalue stackvalue := size //TODO: use uint256.Int instead of converting with toBig() @@ -616,7 +617,7 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([ if !endowment.IsZero() { bigEndowment = endowment.ToBig() } - res, addr, returnGas, suberr := interpreter.evm.Create2(callContext.contract, input, gas, + res, addr, returnGas, suberr := interpreter.evm.Create2(scope.Contract, input, gas, bigEndowment, &salt) // Push item on the stack based on the returned error. if suberr != nil { @@ -625,8 +626,8 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([ } else { stackvalue.SetBytes(addr.Bytes()) } - callContext.stack.push(&stackvalue) - callContext.contract.Gas += returnGas + scope.Stack.push(&stackvalue) + scope.Contract.Gas += returnGas if suberr == ErrExecutionReverted { return res, nil @@ -634,9 +635,9 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([ return nil, nil } -func opCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - stack := callContext.stack - // Pop gas. The actual gas in interpreter.evm.CallGasTemp. +func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + stack := scope.Stack + // Pop gas. The actual gas in interpreter.evm.callGasTemp. // We can use this as a temporary value temp := stack.pop() gas := interpreter.evm.CallGasTemp @@ -644,7 +645,7 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]by addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop() toAddr := common.Address(addr.Bytes20()) // Get the arguments from the memory. - args := callContext.memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) + args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) var bigVal = big0 //TODO: use uint256.Int instead of converting with toBig() @@ -655,7 +656,7 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]by bigVal = value.ToBig() } - ret, returnGas, err := interpreter.evm.Call(callContext.contract, toAddr, args, gas, bigVal) + ret, returnGas, err := interpreter.evm.Call(scope.Contract, toAddr, args, gas, bigVal) if err != nil { temp.Clear() @@ -665,16 +666,16 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]by } stack.push(&temp) if err == nil || err == ErrExecutionReverted { - callContext.memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) + scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } - callContext.contract.Gas += returnGas + scope.Contract.Gas += returnGas return ret, nil } -func opCallCode(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - // Pop gas. The actual gas is in interpreter.evm.CallGasTemp. - stack := callContext.stack +func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + // Pop gas. The actual gas is in interpreter.evm.callGasTemp. + stack := scope.Stack // We use it as a temporary value temp := stack.pop() gas := interpreter.evm.CallGasTemp @@ -682,7 +683,7 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ( addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop() toAddr := common.Address(addr.Bytes20()) // Get arguments from the memory. - args := callContext.memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) + args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) //TODO: use uint256.Int instead of converting with toBig() var bigVal = big0 @@ -691,7 +692,7 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ( bigVal = value.ToBig() } - ret, returnGas, err := interpreter.evm.CallCode(callContext.contract, toAddr, args, gas, bigVal) + ret, returnGas, err := interpreter.evm.CallCode(scope.Contract, toAddr, args, gas, bigVal) if err != nil { temp.Clear() interpreter.evm.CallErrorTemp = err // temp storage, for debug tracing @@ -700,16 +701,16 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ( } stack.push(&temp) if err == nil || err == ErrExecutionReverted { - callContext.memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) + scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } - callContext.contract.Gas += returnGas + scope.Contract.Gas += returnGas return ret, nil } -func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - stack := callContext.stack - // Pop gas. The actual gas is in interpreter.evm.CallGasTemp. +func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + stack := scope.Stack + // Pop gas. The actual gas is in interpreter.evm.callGasTemp. // We use it as a temporary value temp := stack.pop() gas := interpreter.evm.CallGasTemp @@ -717,9 +718,9 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCt addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop() toAddr := common.Address(addr.Bytes20()) // Get arguments from the memory. - args := callContext.memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) + args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) - ret, returnGas, err := interpreter.evm.DelegateCall(callContext.contract, toAddr, args, gas) + ret, returnGas, err := interpreter.evm.DelegateCall(scope.Contract, toAddr, args, gas) if err != nil { temp.Clear() interpreter.evm.CallErrorTemp = err // temp storage, for debug tracing @@ -728,16 +729,16 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCt } stack.push(&temp) if err == nil || err == ErrExecutionReverted { - callContext.memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) + scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } - callContext.contract.Gas += returnGas + scope.Contract.Gas += returnGas return ret, nil } -func opStaticCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - // Pop gas. The actual gas is in interpreter.evm.CallGasTemp. - stack := callContext.stack +func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + // Pop gas. The actual gas is in interpreter.evm.callGasTemp. + stack := scope.Stack // We use it as a temporary value temp := stack.pop() gas := interpreter.evm.CallGasTemp @@ -745,9 +746,9 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop() toAddr := common.Address(addr.Bytes20()) // Get arguments from the memory. - args := callContext.memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) + args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) - ret, returnGas, err := interpreter.evm.StaticCall(callContext.contract, toAddr, args, gas) + ret, returnGas, err := interpreter.evm.StaticCall(scope.Contract, toAddr, args, gas) if err != nil { temp.Clear() interpreter.evm.CallErrorTemp = err // temp storage, for debug tracing @@ -756,36 +757,36 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) } stack.push(&temp) if err == nil || err == ErrExecutionReverted { - callContext.memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) + scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } - callContext.contract.Gas += returnGas + scope.Contract.Gas += returnGas return ret, nil } -func opReturn(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - offset, size := callContext.stack.pop(), callContext.stack.pop() - ret := callContext.memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64())) +func opReturn(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + offset, size := scope.Stack.pop(), scope.Stack.pop() + ret := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64())) return ret, nil } -func opRevert(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - offset, size := callContext.stack.pop(), callContext.stack.pop() - ret := callContext.memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64())) +func opRevert(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + offset, size := scope.Stack.pop(), scope.Stack.pop() + ret := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64())) return ret, nil } -func opStop(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opStop(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { return nil, nil } -func opSuicide(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - beneficiary := callContext.stack.pop() - balance := interpreter.evm.StateDB.GetBalance(callContext.contract.Address()) +func opSuicide(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + beneficiary := scope.Stack.pop() + balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address()) interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance) - interpreter.evm.StateDB.Suicide(callContext.contract.Address()) + interpreter.evm.StateDB.Suicide(scope.Contract.Address()) return nil, nil } @@ -793,18 +794,18 @@ func opSuicide(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([ // make log instruction function func makeLog(size int) executionFunc { - return func(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { + return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { topics := make([]common.Hash, size) - stack := callContext.stack + stack := scope.Stack mStart, mSize := stack.pop(), stack.pop() for i := 0; i < size; i++ { addr := stack.pop() topics[i] = addr.Bytes32() } - d := callContext.memory.GetCopy(int64(mStart.Uint64()), int64(mSize.Uint64())) + d := scope.Memory.GetCopy(int64(mStart.Uint64()), int64(mSize.Uint64())) interpreter.evm.StateDB.AddLog(&types.Log{ - Address: callContext.contract.Address(), + Address: scope.Contract.Address(), Topics: topics, Data: d, // This is a non-consensus field, but assigned here because @@ -817,24 +818,24 @@ func makeLog(size int) executionFunc { } // opPush1 is a specialized version of pushN -func opPush1(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { +func opPush1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { var ( - codeLen = uint64(len(callContext.contract.Code)) + codeLen = uint64(len(scope.Contract.Code)) integer = new(uint256.Int) ) *pc += 1 if *pc < codeLen { - callContext.stack.push(integer.SetUint64(uint64(callContext.contract.Code[*pc]))) + scope.Stack.push(integer.SetUint64(uint64(scope.Contract.Code[*pc]))) } else { - callContext.stack.push(integer.Clear()) + scope.Stack.push(integer.Clear()) } return nil, nil } // make push instruction function func makePush(size uint64, pushByteSize int) executionFunc { - return func(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - codeLen := len(callContext.contract.Code) + return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + codeLen := len(scope.Contract.Code) startMin := codeLen if int(*pc+1) < startMin { @@ -847,8 +848,8 @@ func makePush(size uint64, pushByteSize int) executionFunc { } integer := new(uint256.Int) - callContext.stack.push(integer.SetBytes(common.RightPadBytes( - callContext.contract.Code[startMin:endMin], pushByteSize))) + scope.Stack.push(integer.SetBytes(common.RightPadBytes( + scope.Contract.Code[startMin:endMin], pushByteSize))) *pc += size return nil, nil @@ -857,8 +858,8 @@ func makePush(size uint64, pushByteSize int) executionFunc { // make dup instruction function func makeDup(size int64) executionFunc { - return func(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.dup(int(size)) + return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.dup(int(size)) return nil, nil } } @@ -867,8 +868,8 @@ func makeDup(size int64) executionFunc { func makeSwap(size int64) executionFunc { // switch n + 1 otherwise n would be swapped with n size++ - return func(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.swap(int(size)) + return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.swap(int(size)) return nil, nil } } diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index 66355bd560..6e8dbcac96 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -104,7 +104,7 @@ func testTwoOperandOp(t *testing.T, tests []TwoOperandTestcase, opFn executionFu expected := new(uint256.Int).SetBytes(common.Hex2Bytes(test.Expected)) stack.push(x) stack.push(y) - opFn(&pc, evmInterpreter, &callCtx{nil, stack, nil}) + opFn(&pc, evmInterpreter, &ScopeContext{nil, stack, nil}) if len(stack.data) != 1 { t.Errorf("Expected one item on stack after %v, got %d: ", name, len(stack.data)) } @@ -219,7 +219,7 @@ func TestAddMod(t *testing.T) { stack.push(z) stack.push(y) stack.push(x) - opAddmod(&pc, evmInterpreter, &callCtx{nil, stack, nil}) + opAddmod(&pc, evmInterpreter, &ScopeContext{nil, stack, nil}) actual := stack.pop() if actual.Cmp(expected) != 0 { t.Errorf("Testcase %d, expected %x, got %x", i, expected, actual) @@ -241,7 +241,7 @@ func getResult(args []*twoOperandParams, opFn executionFunc) []TwoOperandTestcas y := new(uint256.Int).SetBytes(common.Hex2Bytes(param.y)) stack.push(x) stack.push(y) - opFn(&pc, interpreter, &callCtx{nil, stack, nil}) + opFn(&pc, interpreter, &ScopeContext{nil, stack, nil}) actual := stack.pop() result[i] = TwoOperandTestcase{param.x, param.y, fmt.Sprintf("%064x", actual)} } @@ -299,7 +299,7 @@ func opBenchmark(bench *testing.B, op executionFunc, args ...string) { a.SetBytes(arg) stack.push(a) } - op(&pc, evmInterpreter, &callCtx{nil, stack, nil}) + op(&pc, evmInterpreter, &ScopeContext{nil, stack, nil}) stack.pop() } } @@ -529,12 +529,12 @@ func TestOpMstore(t *testing.T) { pc := uint64(0) v := "abcdef00000000000000abba000000000deaf000000c0de00100000000133700" stack.pushN(*new(uint256.Int).SetBytes(common.Hex2Bytes(v)), *new(uint256.Int)) - opMstore(&pc, evmInterpreter, &callCtx{mem, stack, nil}) + opMstore(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) if got := common.Bytes2Hex(mem.GetCopy(0, 32)); got != v { t.Fatalf("Mstore fail, got %v, expected %v", got, v) } stack.pushN(*new(uint256.Int).SetUint64(0x1), *new(uint256.Int)) - opMstore(&pc, evmInterpreter, &callCtx{mem, stack, nil}) + opMstore(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) if common.Bytes2Hex(mem.GetCopy(0, 32)) != "0000000000000000000000000000000000000000000000000000000000000001" { t.Fatalf("Mstore failed to overwrite previous value") } @@ -557,7 +557,7 @@ func BenchmarkOpMstore(bench *testing.B) { bench.ResetTimer() for i := 0; i < bench.N; i++ { stack.pushN(*value, *memStart) - opMstore(&pc, evmInterpreter, &callCtx{mem, stack, nil}) + opMstore(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) } } @@ -576,7 +576,7 @@ func BenchmarkOpSHA3(bench *testing.B) { bench.ResetTimer() for i := 0; i < bench.N; i++ { stack.pushN(*uint256.NewInt().SetUint64(32), *start) - opSha3(&pc, evmInterpreter, &callCtx{mem, stack, nil}) + opSha3(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) } } diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 7951bb0c39..d95704c2b1 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -62,12 +62,12 @@ type Interpreter interface { CanRun([]byte) bool } -// callCtx contains the things that are per-call, such as stack and memory, +// ScopeContext contains the things that are per-call, such as stack and memory, // but not transients like pc and gas -type callCtx struct { - memory *Memory - stack *Stack - contract *Contract +type ScopeContext struct { + Memory *Memory + Stack *Stack + Contract *Contract } // keccakState wraps sha3.state. In addition to the usual hash methods, it also supports @@ -146,10 +146,10 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( op OpCode // current opcode mem = NewMemory() // bound memory stack = newstack() // local stack - callContext = &callCtx{ - memory: mem, - stack: stack, - contract: contract, + callContext = &ScopeContext{ + Memory: mem, + Stack: stack, + Contract: contract, } // For optimisation reason we're using uint64 as the program counter. // It's theoretically possible to go above 2^64. The YP defines the PC @@ -174,9 +174,9 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( defer func() { if err != nil { if !logged { - in.cfg.Tracer.CaptureState(in.evm, pcCopy, op, gasCopy, cost, mem, stack, in.returnData, contract, in.evm.depth, err) + in.cfg.Tracer.CaptureState(in.evm, pcCopy, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) } else { - in.cfg.Tracer.CaptureFault(in.evm, pcCopy, op, gasCopy, cost, mem, stack, contract, in.evm.depth, err) + in.cfg.Tracer.CaptureFault(in.evm, pcCopy, op, gasCopy, cost, callContext, in.evm.depth, err) } } }() @@ -262,7 +262,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( } if in.cfg.Debug { - in.cfg.Tracer.CaptureState(in.evm, pc, op, gasCopy, cost, mem, stack, in.returnData, contract, in.evm.depth, err) + in.cfg.Tracer.CaptureState(in.evm, pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) logged = true } diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 91ad45208f..2d43ac9372 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -24,7 +24,7 @@ import ( ) type ( - executionFunc func(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) + executionFunc func(pc *uint64, interpreter *EVMInterpreter, callContext *ScopeContext) ([]byte, error) gasFunc func(*EVM, *Contract, *Stack, *Memory, uint64) (uint64, error) // last parameter is the requested memory size as a uint64 // memorySizeFunc returns the required size, and whether the operation overflowed a uint64 memorySizeFunc func(*Stack) (size uint64, overflow bool) diff --git a/core/vm/logger.go b/core/vm/logger.go index b66e4f908b..c64526f81e 100644 --- a/core/vm/logger.go +++ b/core/vm/logger.go @@ -18,7 +18,6 @@ package vm import ( "encoding/hex" - "errors" "fmt" "io" "math/big" @@ -32,8 +31,6 @@ import ( "github.com/ethereum/go-ethereum/params/types/ctypes" ) -var errTraceLimitReached = errors.New("the number of logs reached the specified limit") - // Storage represents a contract's storage. type Storage map[common.Hash]common.Hash @@ -108,11 +105,11 @@ func (s *StructLog) ErrorString() string { // Note that reference types are actual VM data structures; make copies // if you need to retain them beyond the current call. type Tracer interface { - CapturePreEVM(env *EVM, inputs map[string]interface{}) error - CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error - CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, rData []byte, contract *Contract, depth int, err error) error - CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error - CaptureEnd(env *EVM, output []byte, gasUsed uint64, t time.Duration, err error) error + CapturePreEVM(env *EVM, inputs map[string]interface{}) + CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) + CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) + CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) + CaptureEnd(env *EVM, output []byte, gasUsed uint64, t time.Duration, err error) } // StructLogger is an EVM state logger and implements Tracer. @@ -142,22 +139,23 @@ func NewStructLogger(cfg *LogConfig) *StructLogger { // CapturePreEVM implements the Tracer interface to bootstrap the tracing context, // before EVM init. This is useful for reading initial balance, state, etc. -func (l *StructLogger) CapturePreEVM(env *EVM, inputs map[string]interface{}) error { - return nil +func (l *StructLogger) CapturePreEVM(env *EVM, inputs map[string]interface{}) { } // CaptureStart implements the Tracer interface to initialize the tracing operation. -func (l *StructLogger) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error { - return nil +func (l *StructLogger) CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { } // CaptureState logs a new structured log message and pushes it out to the environment // // CaptureState also tracks SLOAD/SSTORE ops to track storage change. -func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, rData []byte, contract *Contract, depth int, err error) error { +func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) { + memory := scope.Memory + stack := scope.Stack + contract := scope.Contract // check if already accumulated the specified number of logs if l.cfg.Limit != 0 && l.cfg.Limit <= len(l.logs) { - return errTraceLimitReached + return } // Copy a snapshot of the current memory state to a new buffer var mem []byte @@ -207,17 +205,15 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui // create a new snapshot of the EVM. log := StructLog{pc, op, gas, cost, mem, memory.Len(), stck, rdata, storage, depth, env.StateDB.GetRefund(), err} l.logs = append(l.logs, log) - return nil } // CaptureFault implements the Tracer interface to trace an execution fault // while running an opcode. -func (l *StructLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error { - return nil +func (l *StructLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) { } // CaptureEnd is called after the call finishes to finalize the tracing. -func (l *StructLogger) CaptureEnd(env *EVM, output []byte, gasUsed uint64, t time.Duration, err error) error { +func (l *StructLogger) CaptureEnd(env *EVM, output []byte, gasUsed uint64, t time.Duration, err error) { l.output = output l.err = err if l.cfg.Debug { @@ -226,7 +222,6 @@ func (l *StructLogger) CaptureEnd(env *EVM, output []byte, gasUsed uint64, t tim fmt.Printf(" error: %v\n", err) } } - return nil } // StructLogs returns the captured log entries. @@ -300,11 +295,10 @@ func NewMarkdownLogger(cfg *LogConfig, writer io.Writer) *mdLogger { return l } -func (t *mdLogger) CapturePreEVM(env *EVM, inputs map[string]interface{}) error { - return nil +func (t *mdLogger) CapturePreEVM(env *EVM, inputs map[string]interface{}) { } -func (t *mdLogger) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error { +func (t *mdLogger) CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { if !create { fmt.Fprintf(t.out, "From: `%v`\nTo: `%v`\nData: `0x%x`\nGas: `%d`\nValue `%v` wei\n", from.String(), to.String(), @@ -319,10 +313,11 @@ func (t *mdLogger) CaptureStart(from common.Address, to common.Address, create b | Pc | Op | Cost | Stack | RStack | Refund | |-------|-------------|------|-----------|-----------|---------| `) - return nil } -func (t *mdLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, rData []byte, contract *Contract, depth int, err error) error { +// CaptureState also tracks SLOAD/SSTORE ops to track storage change. +func (t *mdLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) { + stack := scope.Stack fmt.Fprintf(t.out, "| %4d | %10v | %3d |", pc, op, cost) if !t.cfg.DisableStack { @@ -339,18 +334,13 @@ func (t *mdLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64 if err != nil { fmt.Fprintf(t.out, "Error: %v\n", err) } - return nil } -func (t *mdLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error { - +func (t *mdLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) { fmt.Fprintf(t.out, "\nError: at pc=%d, op=%v: %v\n", pc, op, err) - - return nil } -func (t *mdLogger) CaptureEnd(env *EVM, output []byte, gasUsed uint64, tm time.Duration, err error) error { +func (t *mdLogger) CaptureEnd(env *EVM, output []byte, gasUsed uint64, tm time.Duration, err error) { fmt.Fprintf(t.out, "\nOutput: `0x%x`\nConsumed gas: `%d`\nError: `%v`\n", output, gasUsed, err) - return nil } diff --git a/core/vm/logger_json.go b/core/vm/logger_json.go index abc916df6d..fd53924adb 100644 --- a/core/vm/logger_json.go +++ b/core/vm/logger_json.go @@ -41,16 +41,19 @@ func NewJSONLogger(cfg *LogConfig, writer io.Writer) *JSONLogger { return l } -func (l *JSONLogger) CapturePreEVM(env *EVM, inputs map[string]interface{}) error { - return nil +func (l *JSONLogger) CapturePreEVM(env *EVM, inputs map[string]interface{}) { } -func (l *JSONLogger) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error { - return nil +func (l *JSONLogger) CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { } +func (l *JSONLogger) CaptureFault(*EVM, uint64, OpCode, uint64, uint64, *ScopeContext, int, error) {} + // CaptureState outputs state information on the logger. -func (l *JSONLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, rData []byte, contract *Contract, depth int, err error) error { +func (l *JSONLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) { + memory := scope.Memory + stack := scope.Stack + log := StructLog{ Pc: pc, Op: op, @@ -76,16 +79,11 @@ func (l *JSONLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint if !l.cfg.DisableReturnData { log.ReturnData = rData } - return l.encoder.Encode(log) -} - -// CaptureFault outputs state information on the logger. -func (l *JSONLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error { - return nil + l.encoder.Encode(log) } // CaptureEnd is triggered at end of execution. -func (l *JSONLogger) CaptureEnd(env *EVM, output []byte, gasUsed uint64, t time.Duration, err error) error { +func (l *JSONLogger) CaptureEnd(env *EVM, output []byte, gasUsed uint64, t time.Duration, err error) { type endLog struct { Output string `json:"output"` GasUsed math.HexOrDecimal64 `json:"gasUsed"` @@ -93,7 +91,7 @@ func (l *JSONLogger) CaptureEnd(env *EVM, output []byte, gasUsed uint64, t time. Err string `json:"error,omitempty"` } if err != nil { - return l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, err.Error()}) + l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, err.Error()}) } - return l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, ""}) + l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, ""}) } diff --git a/core/vm/logger_test.go b/core/vm/logger_test.go index 5a5f42fd34..9c936af36a 100644 --- a/core/vm/logger_test.go +++ b/core/vm/logger_test.go @@ -53,16 +53,20 @@ func TestStoreCapture(t *testing.T) { var ( env = NewEVM(BlockContext{}, TxContext{}, &dummyStatedb{}, params.TestChainConfig, Config{}) logger = NewStructLogger(nil) - mem = NewMemory() - stack = newstack() contract = NewContract(&dummyContractRef{}, &dummyContractRef{}, new(big.Int), 0) + scope = &ScopeContext{ + Memory: NewMemory(), + Stack: newstack(), + Contract: contract, + } ) - stack.push(uint256.NewInt().SetUint64(1)) - stack.push(uint256.NewInt()) + scope.Stack.push(uint256.NewInt().SetUint64(1)) + scope.Stack.push(uint256.NewInt()) var index common.Hash - logger.CaptureState(env, 0, SSTORE, 0, 0, mem, stack, nil, contract, 0, nil) + logger.CaptureState(env, 0, SSTORE, 0, 0, scope, nil, 0, nil) if len(logger.storage[contract.Address()]) == 0 { - t.Fatalf("expected exactly 1 changed value on address %x, got %d", contract.Address(), len(logger.storage[contract.Address()])) + t.Fatalf("expected exactly 1 changed value on address %x, got %d", contract.Address(), + len(logger.storage[contract.Address()])) } exp := common.BigToHash(big.NewInt(1)) if logger.storage[contract.Address()][index] != exp { diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index d7d0fffc3f..7aab05bb17 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -150,7 +150,6 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) { if cfg.ChainConfig.IsEnabled(cfg.ChainConfig.GetEIP2929Transition, vmenv.Context.BlockNumber) { cfg.State.PrepareAccessList(cfg.Origin, nil, vmenv.ActivePrecompiles(), nil) } - // Call the code with the given configuration. code, address, leftOverGas, err := vmenv.Create( sender, @@ -176,7 +175,6 @@ func Call(address common.Address, input []byte, cfg *Config) ([]byte, uint64, er if cfg.ChainConfig.IsEnabled(cfg.ChainConfig.GetEIP2929Transition, vmenv.Context.BlockNumber) { statedb.PrepareAccessList(cfg.Origin, &address, vmenv.ActivePrecompiles(), nil) } - // Call the code with the given configuration. ret, leftOverGas, err := vmenv.Call( sender, @@ -185,6 +183,5 @@ func Call(address common.Address, input []byte, cfg *Config) ([]byte, uint64, er cfg.GasLimit, cfg.Value, ) - return ret, leftOverGas, err } diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go index e83b15e3b7..78918bdf55 100644 --- a/core/vm/runtime/runtime_test.go +++ b/core/vm/runtime/runtime_test.go @@ -325,8 +325,7 @@ type stepCounter struct { steps int } -func (s *stepCounter) CapturePreEVM(env *vm.EVM, inputs map[string]interface{}) error { - return nil +func (s *stepCounter) CapturePreEVM(env *vm.EVM, inputs map[string]interface{}) { } func (s *stepCounter) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error { @@ -339,7 +338,6 @@ func (s *stepCounter) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, co //s.inner.CaptureState(env, pc, op, gas, cost, memory, stack, rStack, contract, depth, err) return nil } - func (s *stepCounter) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error { return nil } diff --git a/eth/api.go b/eth/api.go index fc676c81c1..2121767f85 100644 --- a/eth/api.go +++ b/eth/api.go @@ -61,20 +61,6 @@ func (api *PublicEthereumAPI) Coinbase() (common.Address, error) { return api.Etherbase() } -// Hashrate returns the POW hashrate -func (api *PublicEthereumAPI) Hashrate() hexutil.Uint64 { - return hexutil.Uint64(api.e.Miner().HashRate()) -} - -// ChainId is the EIP-155 replay-protection chain id for the current ethereum chain config. -func (api *PublicEthereumAPI) ChainId() (hexutil.Uint64, error) { - // if current block is at or past the EIP-155 replay-protection fork block, return chainID from config - if config := api.e.blockchain.Config(); config.IsEnabled(config.GetEIP155Transition, api.e.blockchain.CurrentBlock().Number()) { - return (hexutil.Uint64)(api.e.blockchain.Config().GetChainID().Uint64()), nil - } - return hexutil.Uint64(0), fmt.Errorf("chain not synced beyond EIP-155 replay-protection fork block") -} - // PublicMinerAPI provides an API to control the miner. // It offers only methods that operate on data that pose no security risk when it is publicly accessible. type PublicMinerAPI struct { @@ -149,11 +135,6 @@ func (api *PrivateMinerAPI) SetRecommitInterval(interval int) { api.e.Miner().SetRecommitInterval(time.Duration(interval) * time.Millisecond) } -// GetHashrate returns the current hashrate of the miner. -func (api *PrivateMinerAPI) GetHashrate() uint64 { - return api.e.miner.HashRate() -} - // PrivateAdminAPI is the collection of Ethereum full node-related APIs // exposed over the private admin endpoint. type PrivateAdminAPI struct { @@ -450,11 +431,10 @@ func (api *PrivateDebugAPI) StorageRangeAt(blockHash common.Hash, txIndex int, c if block == nil { return StorageRangeResult{}, fmt.Errorf("block %#x not found", blockHash) } - _, _, statedb, release, err := api.eth.stateAtTransaction(block, txIndex, 0) + _, _, statedb, err := api.eth.stateAtTransaction(block, txIndex, 0) if err != nil { return StorageRangeResult{}, err } - defer release() st := statedb.StorageTrie(contractAddress) if st == nil { return StorageRangeResult{}, fmt.Errorf("account %x doesn't exist", contractAddress) diff --git a/eth/api_backend.go b/eth/api_backend.go index 74f6af27a4..4f30d6ff64 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -193,12 +193,14 @@ func (b *EthAPIBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int { return b.eth.blockchain.GetTdByHash(hash) } -func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) { +func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config) (*vm.EVM, func() error, error) { vmError := func() error { return nil } - + if vmConfig == nil { + vmConfig = b.eth.blockchain.GetVMConfig() + } txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(header, b.eth.BlockChain(), nil) - return vm.NewEVM(context, txContext, state, b.eth.blockchain.Config(), *b.eth.blockchain.GetVMConfig()), vmError, nil + return vm.NewEVM(context, txContext, state, b.eth.blockchain.Config(), *vmConfig), vmError, nil } func (b *EthAPIBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { @@ -333,14 +335,10 @@ func (b *EthAPIBackend) StartMining(threads int) error { return b.eth.StartMining(threads) } -func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, func(), error) { - return b.eth.stateAtBlock(block, reexec) -} - -func (b *EthAPIBackend) StatesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, func(), error) { - return b.eth.statesInRange(fromBlock, toBlock, reexec) +func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, error) { + return b.eth.stateAtBlock(block, reexec, base, checkLive) } -func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, func(), error) { +func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) { return b.eth.stateAtTransaction(block, txIndex, reexec) } diff --git a/eth/backend.go b/eth/backend.go index f4276fc781..085d8ddb4c 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -129,11 +129,15 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } log.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024) + // Transfer mining-related config to the ethash config. + ethashConfig := config.Ethash + ethashConfig.NotifyFull = config.Miner.NotifyFull + // Assemble the Ethereum object if config.DatabaseFreezerRemote != "" { - chainDb, err = stack.OpenDatabaseWithFreezerRemote("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezerRemote) + chainDb, err = stack.OpenDatabaseWithFreezerRemote("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezerRemote, false) } else { - chainDb, err = stack.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "eth/db/chaindata/") + chainDb, err = stack.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "eth/db/chaindata/", false) } if err != nil { return nil, err @@ -152,7 +156,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { chainDb: chainDb, eventMux: stack.EventMux(), accountManager: stack.AccountManager(), - engine: ethconfig.CreateConsensusEngine(stack, chainConfig, &config.Ethash, config.Miner.Notify, config.Miner.Noverify, chainDb), + engine: ethconfig.CreateConsensusEngine(stack, chainConfig, ðashConfig, config.Miner.Notify, config.Miner.Noverify, chainDb), closeBloomHandler: make(chan struct{}), networkID: config.NetworkId, gasPrice: config.Miner.GasPrice, diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 6466f930c4..7986b1708d 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -240,7 +240,7 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, headerProcCh: make(chan []*types.Header, 1), quitCh: make(chan struct{}), stateCh: make(chan dataPack), - SnapSyncer: snap.NewSyncer(stateDb, stateBloom), + SnapSyncer: snap.NewSyncer(stateDb), stateSyncStart: make(chan *stateSync), syncStatsState: stateSyncStats{ processed: rawdb.ReadFastTrieProgress(stateDb), diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index f8285b2bb9..20f5ad20cd 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -191,11 +191,11 @@ type Config struct { EVMInterpreter string // RPCGasCap is the global gas cap for eth-call variants. - RPCGasCap uint64 `toml:",omitempty"` + RPCGasCap uint64 // RPCTxFeeCap is the global transaction fee(price * gaslimit) cap for // send-transction variants. The unit is ether. - RPCTxFeeCap float64 `toml:",omitempty"` + RPCTxFeeCap float64 // Checkpoint is a hardcoded checkpoint which can be nil. Checkpoint *ctypes.TrustedCheckpoint `toml:",omitempty"` @@ -239,6 +239,7 @@ func CreateConsensusEngine(stack *node.Node, chainConfig ctypes.ChainConfigurato return ethash.NewPoissonFaker() default: engine := ethash.New(ethash.Config{ + PowMode: config.PowMode, CacheDir: stack.ResolvePath(config.CacheDir), CachesInMem: config.CachesInMem, CachesOnDisk: config.CachesOnDisk, @@ -247,6 +248,7 @@ func CreateConsensusEngine(stack *node.Node, chainConfig ctypes.ChainConfigurato DatasetsInMem: config.DatasetsInMem, DatasetsOnDisk: config.DatasetsOnDisk, DatasetsLockMmap: config.DatasetsLockMmap, + NotifyFull: config.NotifyFull, ECIP1099Block: chainConfig.GetEthashECIP1099Transition(), }, notify, noverify) engine.SetThreads(-1) // Disable CPU mining diff --git a/eth/filters/bench_test.go b/eth/filters/bench_test.go index 9fdaaf2255..e2bbc6f1dc 100644 --- a/eth/filters/bench_test.go +++ b/eth/filters/bench_test.go @@ -65,7 +65,7 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) { benchDataDir := vars.DefaultDataDir() + "/geth/chaindata" b.Log("Running bloombits benchmark section size:", sectionSize) - db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "") + db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) if err != nil { b.Fatalf("error opening database at %v: %v", benchDataDir, err) } @@ -126,7 +126,7 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) { for i := 0; i < benchFilterCnt; i++ { if i%20 == 0 { db.Close() - db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "") + db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) backend = &testBackend{db: db, sections: cnt} } var addr common.Address @@ -157,7 +157,7 @@ func clearBloomBits(db ethdb.Database) { func BenchmarkNoBloomBits(b *testing.B) { benchDataDir := vars.DefaultDataDir() + "/geth/chaindata" b.Log("Running benchmark without bloombits") - db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "") + db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) if err != nil { b.Fatalf("error opening database at %v: %v", benchDataDir, err) } diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index f45720d5a9..3fc77bbc4d 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -49,7 +49,7 @@ func BenchmarkFilters(b *testing.B) { defer os.RemoveAll(dir) var ( - db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "") + db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "", false) backend = &testBackend{db: db} key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -103,7 +103,7 @@ func TestFilters(t *testing.T) { defer os.RemoveAll(dir) var ( - db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "") + db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "", false) backend = &testBackend{db: db} key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr = crypto.PubkeyToAddress(key1.PublicKey) diff --git a/eth/handler.go b/eth/handler.go index 2006504812..aab9533a60 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -178,7 +178,11 @@ func newHandler(config *handlerConfig) (*handler, error) { // Construct the downloader (long sync) and its backing state bloom if fast // sync is requested. The downloader is responsible for deallocating the state // bloom when it's done. - if atomic.LoadUint32(&h.fastSync) == 1 { + // Note: we don't enable it if snap-sync is performed, since it's very heavy + // and the heal-portion of the snap sync is much lighter than fast. What we particularly + // want to avoid, is a 90%-finished (but restarted) snap-sync to begin + // indexing the entire trie + if atomic.LoadUint32(&h.fastSync) == 1 && atomic.LoadUint32(&h.snapSync) == 0 { h.stateBloom = trie.NewSyncBloom(config.BloomCache, config.Database) } h.downloader = downloader.New(h.checkpointNumber, config.Database, h.stateBloom, h.eventMux, h.chain, nil, h.removePeer) diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go index fce477fa24..aab96c73af 100644 --- a/eth/protocols/eth/handler.go +++ b/eth/protocols/eth/handler.go @@ -24,6 +24,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" @@ -241,7 +242,18 @@ func handleMessage(backend Backend, peer *Peer) error { } else if peer.Version() >= ETH66 { handlers = eth66 } - + // Track the emount of time it takes to serve the request and run the handler + if metrics.Enabled { + h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code) + defer func(start time.Time) { + sampler := func() metrics.Sample { + return metrics.ResettingSample( + metrics.NewExpDecaySample(1028, 0.015), + ) + } + metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds()) + }(time.Now()) + } if handler := handlers[msg.Code]; handler != nil { return handler(backend, msg, peer) } diff --git a/eth/protocols/eth/protocol_test.go b/eth/protocols/eth/protocol_test.go index d92f3ea837..7910c9b735 100644 --- a/eth/protocols/eth/protocol_test.go +++ b/eth/protocols/eth/protocol_test.go @@ -178,7 +178,7 @@ func TestEth66Messages(t *testing.T) { // init the receipts { receipts = []*types.Receipt{ - &types.Receipt{ + { Status: types.ReceiptStatusFailed, CumulativeGasUsed: 1, Logs: []*types.Log{ diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go index 24c8599552..f7939964f3 100644 --- a/eth/protocols/snap/handler.go +++ b/eth/protocols/snap/handler.go @@ -19,12 +19,14 @@ package snap import ( "bytes" "fmt" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" @@ -48,6 +50,11 @@ const ( // maxTrieNodeLookups is the maximum number of state trie nodes to serve. This // number is there to limit the number of disk lookups. maxTrieNodeLookups = 1024 + + // maxTrieNodeTimeSpent is the maximum time we should spend on looking up trie nodes. + // If we spend too much time, then it's a fairly high chance of timing out + // at the remote side, which means all the work is in vain. + maxTrieNodeTimeSpent = 5 * time.Second ) // Handler is a callback to invoke from an outside runner after the boilerplate @@ -115,7 +122,7 @@ func handle(backend Backend, peer *Peer) error { } // handleMessage is invoked whenever an inbound message is received from a -// remote peer on the `spap` protocol. The remote connection is torn down upon +// remote peer on the `snap` protocol. The remote connection is torn down upon // returning any error. func handleMessage(backend Backend, peer *Peer) error { // Read the next message from the remote peer, and ensure it's fully consumed @@ -127,7 +134,19 @@ func handleMessage(backend Backend, peer *Peer) error { return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize) } defer msg.Discard() - + start := time.Now() + // Track the emount of time it takes to serve the request and run the handler + if metrics.Enabled { + h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code) + defer func(start time.Time) { + sampler := func() metrics.Sample { + return metrics.ResettingSample( + metrics.NewExpDecaySample(1028, 0.015), + ) + } + metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds()) + }(start) + } // Handle the message depending on its contents switch { case msg.Code == GetAccountRangeMsg: @@ -256,8 +275,13 @@ func handleMessage(backend Backend, peer *Peer) error { var ( storage []*StorageData last common.Hash + abort bool ) - for it.Next() && size < hardLimit { + for it.Next() { + if size >= hardLimit { + abort = true + break + } hash, slot := it.Hash(), common.CopyBytes(it.Slot()) // Track the returned interval for the Merkle proofs @@ -280,7 +304,7 @@ func handleMessage(backend Backend, peer *Peer) error { // Generate the Merkle proofs for the first and last storage slot, but // only if the response was capped. If the entire storage trie included // in the response, no need for any proofs. - if origin != (common.Hash{}) || size >= hardLimit { + if origin != (common.Hash{}) || abort { // Request started at a non-zero hash or was capped prematurely, add // the endpoint Merkle proofs accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB()) @@ -451,13 +475,13 @@ func handleMessage(backend Backend, peer *Peer) error { bytes += uint64(len(blob)) // Sanity check limits to avoid DoS on the store trie loads - if bytes > req.Bytes || loads > maxTrieNodeLookups { + if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { break } } } // Abort request processing if we've exceeded our limits - if bytes > req.Bytes || loads > maxTrieNodeLookups { + if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { break } } diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index 1cfdef15bd..2924fa0802 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -85,7 +85,7 @@ const ( var ( // requestTimeout is the maximum time a peer is allowed to spend on serving // a single network request. - requestTimeout = 10 * time.Second // TODO(karalabe): Make it dynamic ala fast-sync? + requestTimeout = 15 * time.Second // TODO(karalabe): Make it dynamic ala fast-sync? ) // ErrCancelled is returned from snap syncing if the operation was prematurely @@ -376,8 +376,7 @@ type SyncPeer interface { // - The peer delivers a stale response after a previous timeout // - The peer delivers a refusal to serve the requested state type Syncer struct { - db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup) - bloom *trie.SyncBloom // Bloom filter to deduplicate nodes for state fixup + db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup) root common.Hash // Current state trie root being synced tasks []*accountTask // Current account task set being synced @@ -446,10 +445,9 @@ type Syncer struct { // NewSyncer creates a new snapshot syncer to download the Ethereum state over the // snap protocol. -func NewSyncer(db ethdb.KeyValueStore, bloom *trie.SyncBloom) *Syncer { +func NewSyncer(db ethdb.KeyValueStore) *Syncer { return &Syncer{ - db: db, - bloom: bloom, + db: db, peers: make(map[string]SyncPeer), peerJoin: new(event.Feed), @@ -546,7 +544,7 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error { s.lock.Lock() s.root = root s.healer = &healTask{ - scheduler: state.NewStateSync(root, s.db, s.bloom), + scheduler: state.NewStateSync(root, s.db, nil), trieTasks: make(map[common.Hash]trie.SyncPath), codeTasks: make(map[common.Hash]struct{}), } @@ -813,6 +811,8 @@ func (s *Syncer) assignAccountTasks(cancel chan struct{}) { if idle == "" { return } + peer := s.peers[idle] + // Matched a pending task to an idle peer, allocate a unique request id var reqid uint64 for { @@ -836,14 +836,14 @@ func (s *Syncer) assignAccountTasks(cancel chan struct{}) { task: task, } req.timeout = time.AfterFunc(requestTimeout, func() { - log.Debug("Account range request timed out") + peer.Log().Debug("Account range request timed out", "reqid", reqid) s.scheduleRevertAccountRequest(req) }) s.accountReqs[reqid] = req delete(s.accountIdlers, idle) s.pend.Add(1) - go func(peer SyncPeer, root common.Hash) { + go func(root common.Hash) { defer s.pend.Done() // Attempt to send the remote request and revert if it fails @@ -851,7 +851,7 @@ func (s *Syncer) assignAccountTasks(cancel chan struct{}) { peer.Log().Debug("Failed to request account range", "err", err) s.scheduleRevertAccountRequest(req) } - }(s.peers[idle], s.root) // We're in the lock, peers[id] surely exists + }(s.root) // Inject the request into the task to block further assignments task.req = req @@ -893,6 +893,8 @@ func (s *Syncer) assignBytecodeTasks(cancel chan struct{}) { if idle == "" { return } + peer := s.peers[idle] + // Matched a pending task to an idle peer, allocate a unique request id var reqid uint64 for { @@ -923,14 +925,14 @@ func (s *Syncer) assignBytecodeTasks(cancel chan struct{}) { task: task, } req.timeout = time.AfterFunc(requestTimeout, func() { - log.Debug("Bytecode request timed out") + peer.Log().Debug("Bytecode request timed out", "reqid", reqid) s.scheduleRevertBytecodeRequest(req) }) s.bytecodeReqs[reqid] = req delete(s.bytecodeIdlers, idle) s.pend.Add(1) - go func(peer SyncPeer) { + go func() { defer s.pend.Done() // Attempt to send the remote request and revert if it fails @@ -938,7 +940,7 @@ func (s *Syncer) assignBytecodeTasks(cancel chan struct{}) { log.Debug("Failed to request bytecodes", "err", err) s.scheduleRevertBytecodeRequest(req) } - }(s.peers[idle]) // We're in the lock, peers[id] surely exists + }() } } @@ -978,6 +980,8 @@ func (s *Syncer) assignStorageTasks(cancel chan struct{}) { if idle == "" { return } + peer := s.peers[idle] + // Matched a pending task to an idle peer, allocate a unique request id var reqid uint64 for { @@ -1047,14 +1051,14 @@ func (s *Syncer) assignStorageTasks(cancel chan struct{}) { req.limit = subtask.Last } req.timeout = time.AfterFunc(requestTimeout, func() { - log.Debug("Storage request timed out") + peer.Log().Debug("Storage request timed out", "reqid", reqid) s.scheduleRevertStorageRequest(req) }) s.storageReqs[reqid] = req delete(s.storageIdlers, idle) s.pend.Add(1) - go func(peer SyncPeer, root common.Hash) { + go func(root common.Hash) { defer s.pend.Done() // Attempt to send the remote request and revert if it fails @@ -1066,7 +1070,7 @@ func (s *Syncer) assignStorageTasks(cancel chan struct{}) { log.Debug("Failed to request storage", "err", err) s.scheduleRevertStorageRequest(req) } - }(s.peers[idle], s.root) // We're in the lock, peers[id] surely exists + }(s.root) // Inject the request into the subtask to block further assignments if subtask != nil { @@ -1123,6 +1127,8 @@ func (s *Syncer) assignTrienodeHealTasks(cancel chan struct{}) { if idle == "" { return } + peer := s.peers[idle] + // Matched a pending task to an idle peer, allocate a unique request id var reqid uint64 for { @@ -1162,14 +1168,14 @@ func (s *Syncer) assignTrienodeHealTasks(cancel chan struct{}) { task: s.healer, } req.timeout = time.AfterFunc(requestTimeout, func() { - log.Debug("Trienode heal request timed out") + peer.Log().Debug("Trienode heal request timed out", "reqid", reqid) s.scheduleRevertTrienodeHealRequest(req) }) s.trienodeHealReqs[reqid] = req delete(s.trienodeHealIdlers, idle) s.pend.Add(1) - go func(peer SyncPeer, root common.Hash) { + go func(root common.Hash) { defer s.pend.Done() // Attempt to send the remote request and revert if it fails @@ -1177,7 +1183,7 @@ func (s *Syncer) assignTrienodeHealTasks(cancel chan struct{}) { log.Debug("Failed to request trienode healers", "err", err) s.scheduleRevertTrienodeHealRequest(req) } - }(s.peers[idle], s.root) // We're in the lock, peers[id] surely exists + }(s.root) } } @@ -1229,6 +1235,8 @@ func (s *Syncer) assignBytecodeHealTasks(cancel chan struct{}) { if idle == "" { return } + peer := s.peers[idle] + // Matched a pending task to an idle peer, allocate a unique request id var reqid uint64 for { @@ -1260,14 +1268,14 @@ func (s *Syncer) assignBytecodeHealTasks(cancel chan struct{}) { task: s.healer, } req.timeout = time.AfterFunc(requestTimeout, func() { - log.Debug("Bytecode heal request timed out") + peer.Log().Debug("Bytecode heal request timed out", "reqid", reqid) s.scheduleRevertBytecodeHealRequest(req) }) s.bytecodeHealReqs[reqid] = req delete(s.bytecodeHealIdlers, idle) s.pend.Add(1) - go func(peer SyncPeer) { + go func() { defer s.pend.Done() // Attempt to send the remote request and revert if it fails @@ -1275,7 +1283,7 @@ func (s *Syncer) assignBytecodeHealTasks(cancel chan struct{}) { log.Debug("Failed to request bytecode healers", "err", err) s.scheduleRevertBytecodeHealRequest(req) } - }(s.peers[idle]) // We're in the lock, peers[id] surely exists + }() } } @@ -1553,7 +1561,14 @@ func (s *Syncer) processAccountResponse(res *accountResponse) { // Ensure that the response doesn't overflow into the subsequent task last := res.task.Last.Big() for i, hash := range res.hashes { - if hash.Big().Cmp(last) > 0 { + // Mark the range complete if the last is already included. + // Keep iteration to delete the extra states if exists. + cmp := hash.Big().Cmp(last) + if cmp == 0 { + res.cont = false + continue + } + if cmp > 0 { // Chunk overflown, cut off excess, but also update the boundary nodes for j := i; j < len(res.hashes); j++ { if err := res.trie.Prove(res.hashes[j][:], 0, res.overflow); err != nil { @@ -1660,7 +1675,6 @@ func (s *Syncer) processBytecodeResponse(res *bytecodeResponse) { bytes += common.StorageSize(len(code)) rawdb.WriteCode(batch, hash, code) - s.bloom.Add(hash[:]) } if err := batch.Write(); err != nil { log.Crit("Failed to persist bytecodes", "err", err) @@ -1761,7 +1775,14 @@ func (s *Syncer) processStorageResponse(res *storageResponse) { // Ensure the response doesn't overflow into the subsequent task last := res.subTask.Last.Big() for k, hash := range res.hashes[i] { - if hash.Big().Cmp(last) > 0 { + // Mark the range complete if the last is already included. + // Keep iteration to delete the extra states if exists. + cmp := hash.Big().Cmp(last) + if cmp == 0 { + res.cont = false + continue + } + if cmp > 0 { // Chunk overflown, cut off excess, but also update the boundary for l := k; l < len(res.hashes[i]); l++ { if err := res.tries[i].Prove(res.hashes[i][l][:], 0, res.overflow); err != nil { @@ -1788,15 +1809,18 @@ func (s *Syncer) processStorageResponse(res *storageResponse) { it := res.nodes[i].NewIterator(nil, nil) for it.Next() { // Boundary nodes are not written for the last result, since they are incomplete - if i == len(res.hashes)-1 { + if i == len(res.hashes)-1 && res.subTask != nil { if _, ok := res.bounds[common.BytesToHash(it.Key())]; ok { skipped++ continue } + if _, err := res.overflow.Get(it.Key()); err == nil { + skipped++ + continue + } } // Node is not a boundary, persist to disk batch.Put(it.Key(), it.Value()) - s.bloom.Add(it.Key()) bytes += common.StorageSize(common.HashLength + len(it.Value())) nodes++ @@ -1953,7 +1977,6 @@ func (s *Syncer) forwardAccountTask(task *accountTask) { } // Node is neither a boundary, not an incomplete account, persist to disk batch.Put(it.Key(), it.Value()) - s.bloom.Add(it.Key()) bytes += common.StorageSize(common.HashLength + len(it.Value())) nodes++ diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index 0b048786e8..3e9778dbc7 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -23,6 +23,7 @@ import ( "fmt" "math/big" "sort" + "sync" "testing" "time" @@ -30,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" @@ -111,10 +113,12 @@ func BenchmarkHashing(b *testing.B) { }) } -type storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error -type accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error -type trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error -type codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error +type ( + accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error + storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error + trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error + codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error +) type testPeer struct { id string @@ -130,10 +134,10 @@ type testPeer struct { storageRequestHandler storageHandlerFunc trieRequestHandler trieHandlerFunc codeRequestHandler codeHandlerFunc - cancelCh chan struct{} + term func() } -func newTestPeer(id string, t *testing.T, cancelCh chan struct{}) *testPeer { +func newTestPeer(id string, t *testing.T, term func()) *testPeer { peer := &testPeer{ id: id, test: t, @@ -142,12 +146,11 @@ func newTestPeer(id string, t *testing.T, cancelCh chan struct{}) *testPeer { trieRequestHandler: defaultTrieRequestHandler, storageRequestHandler: defaultStorageRequestHandler, codeRequestHandler: defaultCodeRequestHandler, - cancelCh: cancelCh, + term: term, } //stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true)) //peer.logger.SetHandler(stderrHandler) return peer - } func (t *testPeer) ID() string { return t.id } @@ -155,7 +158,7 @@ func (t *testPeer) Log() log.Logger { return t.logger } func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error { t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes)) - go t.accountRequestHandler(t, id, root, origin, bytes) + go t.accountRequestHandler(t, id, root, origin, limit, bytes) return nil } @@ -211,20 +214,21 @@ func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, } // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests -func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, cap uint64) error { - keys, vals, proofs := createAccountRequestResponse(t, root, origin, cap) +func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap) if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil { - t.logger.Error("remote error on delivery", "error", err) t.test.Errorf("Remote side rejected our delivery: %v", err) - t.remote.Unregister(t.id) - close(t.cancelCh) + t.term() return err } return nil } -func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) { +func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) { var size uint64 + if limit == (common.Hash{}) { + limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } for _, entry := range t.accountValues { if size > cap { break @@ -234,20 +238,22 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H vals = append(vals, entry.v) size += uint64(32 + len(entry.v)) } + // If we've exceeded the request threshold, abort + if bytes.Compare(entry.k, limit[:]) >= 0 { + break + } } // Unless we send the entire trie, we need to supply proofs - // Actually, we need to supply proofs either way! This seems tob be an implementation + // Actually, we need to supply proofs either way! This seems to be an implementation // quirk in go-ethereum proof := light.NewNodeSet() if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil { - t.logger.Error("Could not prove inexistence of origin", "origin", origin, - "error", err) + t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err) } if len(keys) > 0 { lastK := (keys[len(keys)-1])[:] if err := t.accountTrie.Prove(lastK, 0, proof); err != nil { - t.logger.Error("Could not prove last item", - "error", err) + t.logger.Error("Could not prove last item", "error", err) } } for _, blob := range proof.NodeList() { @@ -260,9 +266,8 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error { hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max) if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil { - t.logger.Error("remote error on delivery", "error", err) t.test.Errorf("Remote side rejected our delivery: %v", err) - close(t.cancelCh) + t.term() } return nil } @@ -270,58 +275,112 @@ func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Has func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { var bytecodes [][]byte for _, h := range hashes { - bytecodes = append(bytecodes, getCode(h)) + bytecodes = append(bytecodes, getCodeByHash(h)) } if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { - t.logger.Error("remote error on delivery", "error", err) t.test.Errorf("Remote side rejected our delivery: %v", err) - close(t.cancelCh) + t.term() } return nil } -func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) { - var ( - size uint64 - limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") - ) - if len(bLimit) > 0 { - limit = common.BytesToHash(bLimit) +func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) { + var size uint64 + for _, account := range accounts { + // The first account might start from a different origin and end sooner + var originHash common.Hash + if len(origin) > 0 { + originHash = common.BytesToHash(origin) + } + var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + if len(limit) > 0 { + limitHash = common.BytesToHash(limit) + } + var ( + keys []common.Hash + vals [][]byte + abort bool + ) + for _, entry := range t.storageValues[account] { + if size >= max { + abort = true + break + } + if bytes.Compare(entry.k, originHash[:]) < 0 { + continue + } + keys = append(keys, common.BytesToHash(entry.k)) + vals = append(vals, entry.v) + size += uint64(32 + len(entry.v)) + if bytes.Compare(entry.k, limitHash[:]) >= 0 { + break + } + } + hashes = append(hashes, keys) + slots = append(slots, vals) + + // Generate the Merkle proofs for the first and last storage slot, but + // only if the response was capped. If the entire storage trie included + // in the response, no need for any proofs. + if originHash != (common.Hash{}) || abort { + // If we're aborting, we need to prove the first and last item + // This terminates the response (and thus the loop) + proof := light.NewNodeSet() + stTrie := t.storageTries[account] + + // Here's a potential gotcha: when constructing the proof, we cannot + // use the 'origin' slice directly, but must use the full 32-byte + // hash form. + if err := stTrie.Prove(originHash[:], 0, proof); err != nil { + t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err) + } + if len(keys) > 0 { + lastK := (keys[len(keys)-1])[:] + if err := stTrie.Prove(lastK, 0, proof); err != nil { + t.logger.Error("Could not prove last item", "error", err) + } + } + for _, blob := range proof.NodeList() { + proofs = append(proofs, blob) + } + break + } } + return hashes, slots, proofs +} + +// the createStorageRequestResponseAlwaysProve tests a cornercase, where it always +// supplies the proof for the last account, even if it is 'complete'.h +func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) { + var size uint64 + max = max * 3 / 4 + var origin common.Hash if len(bOrigin) > 0 { origin = common.BytesToHash(bOrigin) } - - var limitExceeded bool - var incomplete bool - for _, account := range accounts { - + var exit bool + for i, account := range accounts { var keys []common.Hash var vals [][]byte for _, entry := range t.storageValues[account] { - if limitExceeded { - incomplete = true - break - } if bytes.Compare(entry.k, origin[:]) < 0 { - incomplete = true - continue + exit = true } keys = append(keys, common.BytesToHash(entry.k)) vals = append(vals, entry.v) size += uint64(32 + len(entry.v)) - if bytes.Compare(entry.k, limit[:]) >= 0 { - limitExceeded = true - } if size > max { - limitExceeded = true + exit = true } } + if i == len(accounts)-1 { + exit = true + } hashes = append(hashes, keys) slots = append(slots, vals) - if incomplete { + if exit { // If we're aborting, we need to prove the first and last item // This terminates the response (and thus the loop) proof := light.NewNodeSet() @@ -350,21 +409,17 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm } // emptyRequestAccountRangeFn is a rejects AccountRangeRequests -func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { - var proofs [][]byte - var keys []common.Hash - var vals [][]byte - t.remote.OnAccounts(t, requestId, keys, vals, proofs) +func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + t.remote.OnAccounts(t, requestId, nil, nil, nil) return nil } -func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { +func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { return nil } func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error { - var nodes [][]byte - t.remote.OnTrieNodes(t, requestId, nodes) + t.remote.OnTrieNodes(t, requestId, nil) return nil } @@ -373,10 +428,7 @@ func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common. } func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { - var hashes [][]common.Hash - var slots [][][]byte - var proofs [][]byte - t.remote.OnStorage(t, requestId, hashes, slots, proofs) + t.remote.OnStorage(t, requestId, nil, nil, nil) return nil } @@ -384,6 +436,15 @@ func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root comm return nil } +func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { + hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max) + if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil { + t.test.Errorf("Remote side rejected our delivery: %v", err) + t.term() + } + return nil +} + //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { // var bytecodes [][]byte // t.remote.OnByteCodes(t, id, bytecodes) @@ -397,7 +458,7 @@ func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max bytecodes = append(bytecodes, h[:]) } if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { - t.logger.Error("remote error on delivery", "error", err) + t.logger.Info("remote error on delivery (as expected)", "error", err) // Mimic the real-life handler, which drops a peer on errors t.remote.Unregister(t.id) } @@ -407,12 +468,12 @@ func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { var bytecodes [][]byte for _, h := range hashes[:1] { - bytecodes = append(bytecodes, getCode(h)) + bytecodes = append(bytecodes, getCodeByHash(h)) } + // Missing bytecode can be retrieved again, no error expected if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { - t.logger.Error("remote error on delivery", "error", err) - // Mimic the real-life handler, which drops a peer on errors - t.remote.Unregister(t.id) + t.test.Errorf("Remote side rejected our delivery: %v", err) + t.term() } return nil } @@ -422,16 +483,16 @@ func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Ha return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500) } -func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { - return defaultAccountRequestHandler(t, requestId, root, origin, 500) +func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500) } //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { // return defaultAccountRequestHandler(t, requestId-1, root, origin, 500) //} -func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { - hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, cap) +func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap) if len(proofs) > 0 { proofs = proofs[1:] } @@ -473,23 +534,36 @@ func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Has func TestSyncBloatedProof(t *testing.T) { t.Parallel() + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(100) - cancel := make(chan struct{}) - source := newTestPeer("source", t, cancel) + source := newTestPeer("source", t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems - source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { - var proofs [][]byte - var keys []common.Hash - var vals [][]byte - + source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + var ( + proofs [][]byte + keys []common.Hash + vals [][]byte + ) // The values for _, entry := range t.accountValues { - if bytes.Compare(origin[:], entry.k) <= 0 { - keys = append(keys, common.BytesToHash(entry.k)) - vals = append(vals, entry.v) + if bytes.Compare(entry.k, origin[:]) < 0 { + continue + } + if bytes.Compare(entry.k, limit[:]) > 0 { + continue } + keys = append(keys, common.BytesToHash(entry.k)) + vals = append(vals, entry.v) } // The proofs proof := light.NewNodeSet() @@ -511,9 +585,9 @@ func TestSyncBloatedProof(t *testing.T) { proofs = append(proofs, blob) } if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil { - t.logger.Info("remote error on delivery", "error", err) + t.logger.Info("remote error on delivery (as expected)", "error", err) + t.term() // This is actually correct, signal to exit the test successfully - close(t.cancelCh) } return nil } @@ -525,7 +599,7 @@ func TestSyncBloatedProof(t *testing.T) { func setupSyncer(peers ...*testPeer) *Syncer { stateDb := rawdb.NewMemoryDatabase() - syncer := NewSyncer(stateDb, trie.NewSyncBloom(1, stateDb)) + syncer := NewSyncer(stateDb) for _, peer := range peers { syncer.Register(peer) peer.remote = syncer @@ -537,20 +611,28 @@ func setupSyncer(peers ...*testPeer) *Syncer { func TestSync(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(100) mkSource := func(name string) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems return source } - - syncer := setupSyncer(mkSource("sourceA")) + syncer := setupSyncer(mkSource("source")) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a @@ -558,56 +640,79 @@ func TestSync(t *testing.T) { func TestSyncTinyTriePanic(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(1) mkSource := func(name string) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems return source } - - syncer := setupSyncer( - mkSource("nice-a"), - ) - done := checkStall(t, cancel) + syncer := setupSyncer(mkSource("source")) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSync tests a basic sync with multiple peers func TestMultiSync(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(100) mkSource := func(name string) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems return source } - syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB")) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } + close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncWithStorage tests basic sync using accounts + storage + code func TestSyncWithStorage(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false) mkSource := func(name string) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries @@ -615,33 +720,43 @@ func TestSyncWithStorage(t *testing.T) { return source } syncer := setupSyncer(mkSource("sourceA")) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } + close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all func TestMultiSyncManyUseless(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) - mkSource := func(name string, a, b, c bool) *testPeer { - source := newTestPeer(name, t, cancel) + mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries source.storageValues = storageElems - if !a { + if !noAccount { source.accountRequestHandler = emptyRequestAccountRangeFn } - if !b { + if !noStorage { source.storageRequestHandler = emptyStorageRequestHandler } - if !c { + if !noTrieNode { source.trieRequestHandler = emptyTrieRequestHandler } return source @@ -653,9 +768,12 @@ func TestMultiSyncManyUseless(t *testing.T) { mkSource("noStorage", true, false, true), mkSource("noTrie", true, true, false), ) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } + close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all @@ -666,24 +784,31 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { defer func(old time.Duration) { requestTimeout = old }(requestTimeout) requestTimeout = time.Millisecond - cancel := make(chan struct{}) - - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) - mkSource := func(name string, a, b, c bool) *testPeer { - source := newTestPeer(name, t, cancel) + mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries source.storageValues = storageElems - if !a { + if !noAccount { source.accountRequestHandler = emptyRequestAccountRangeFn } - if !b { + if !noStorage { source.storageRequestHandler = emptyStorageRequestHandler } - if !c { + if !noTrieNode { source.trieRequestHandler = emptyTrieRequestHandler } return source @@ -695,9 +820,12 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { mkSource("noStorage", true, false, true), mkSource("noTrie", true, true, false), ) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } + close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all @@ -706,24 +834,31 @@ func TestMultiSyncManyUnresponsive(t *testing.T) { defer func(old time.Duration) { requestTimeout = old }(requestTimeout) requestTimeout = time.Millisecond - cancel := make(chan struct{}) - - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) - mkSource := func(name string, a, b, c bool) *testPeer { - source := newTestPeer(name, t, cancel) + mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries source.storageValues = storageElems - if !a { + if !noAccount { source.accountRequestHandler = nonResponsiveRequestAccountRangeFn } - if !b { + if !noStorage { source.storageRequestHandler = nonResponsiveStorageRequestHandler } - if !c { + if !noTrieNode { source.trieRequestHandler = nonResponsiveTrieRequestHandler } return source @@ -735,18 +870,21 @@ func TestMultiSyncManyUnresponsive(t *testing.T) { mkSource("noStorage", true, false, true), mkSource("noTrie", true, true, false), ) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } + close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } -func checkStall(t *testing.T, cancel chan struct{}) chan struct{} { +func checkStall(t *testing.T, term func()) chan struct{} { testDone := make(chan struct{}) go func() { select { case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much t.Log("Sync stalled") - close(cancel) + term() case <-testDone: return } @@ -754,17 +892,58 @@ func checkStall(t *testing.T, cancel chan struct{}) chan struct{} { return testDone } +// TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the +// account trie has a few boundary elements. +func TestSyncBoundaryAccountTrie(t *testing.T) { + t.Parallel() + + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems := makeBoundaryAccountTrie(3000) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + return source + } + syncer := setupSyncer( + mkSource("peer-a"), + mkSource("peer-b"), + ) + done := checkStall(t, term) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) +} + // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is // consistently returning very small results func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) mkSource := func(name string, slow bool) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems @@ -780,11 +959,12 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { mkSource("nice-c", false), mkSource("capped", true), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver @@ -792,12 +972,19 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.codeRequestHandler = codeFn @@ -811,22 +998,30 @@ func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) { mkSource("capped", cappedCodeRequestHandler), mkSource("corrupt", corruptCodeRequestHandler), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) mkSource := func(name string, accFn accountHandlerFunc) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.accountRequestHandler = accFn @@ -840,11 +1035,12 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { mkSource("capped", defaultAccountRequestHandler), mkSource("corrupt", corruptAccountRequestHandler), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes @@ -852,12 +1048,19 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.codeRequestHandler = codeFn @@ -872,7 +1075,7 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { return cappedCodeRequestHandler(t, id, hashes, max) }), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } @@ -885,6 +1088,43 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { if threshold := 100; counter > threshold { t.Fatalf("Error, expected < %d invocations, got %d", threshold, counter) } + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) +} + +// TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the +// storage trie has a few boundary elements. +func TestSyncBoundaryStorageTrie(t *testing.T) { + t.Parallel() + + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + source.storageTries = storageTries + source.storageValues = storageElems + return source + } + syncer := setupSyncer( + mkSource("peer-a"), + mkSource("peer-b"), + ) + done := checkStall(t, term) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is @@ -892,12 +1132,19 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false) mkSource := func(name string, slow bool) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries @@ -913,11 +1160,12 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { mkSource("nice-a", false), mkSource("slow", true), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is @@ -925,12 +1173,19 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { func TestSyncWithStorageAndCorruptPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) mkSource := func(name string, handler storageHandlerFunc) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries @@ -945,22 +1200,30 @@ func TestSyncWithStorageAndCorruptPeer(t *testing.T) { mkSource("nice-c", defaultStorageRequestHandler), mkSource("corrupt", corruptStorageRequestHandler), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) mkSource := func(name string, handler storageHandlerFunc) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries @@ -968,23 +1231,55 @@ func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { source.storageRequestHandler = handler return source } - syncer := setupSyncer( mkSource("nice-a", defaultStorageRequestHandler), mkSource("nice-b", defaultStorageRequestHandler), mkSource("nice-c", defaultStorageRequestHandler), mkSource("corrupt", noProofStorageRequestHandler), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) +} + +// TestSyncWithStorage tests basic sync using accounts + storage + code, against +// a peer who insists on delivering full storage sets _and_ proofs. This triggered +// an error, where the recipient erroneously clipped the boundary nodes, but +// did not mark the account for healing. +func TestSyncWithStorageMisbehavingProve(t *testing.T) { + t.Parallel() + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + source.storageTries = storageTries + source.storageValues = storageElems + source.storageRequestHandler = proofHappyStorageRequestHandler + return source + } + syncer := setupSyncer(mkSource("sourceA")) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } type kv struct { k, v []byte - t bool } // Some helpers for sorting @@ -1013,14 +1308,14 @@ var ( } ) -// getACodeHash returns a pseudo-random code hash -func getACodeHash(i uint64) []byte { +// getCodeHash returns a pseudo-random code hash +func getCodeHash(i uint64) []byte { h := codehashes[int(i)%len(codehashes)] return common.CopyBytes(h[:]) } -// convenience function to lookup the code from the code hash -func getCode(hash common.Hash) []byte { +// getCodeByHash convenience function to lookup the code from the code hash +func getCodeByHash(hash common.Hash) []byte { if hash == emptyCode { return nil } @@ -1042,23 +1337,77 @@ func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) { Nonce: i, Balance: big.NewInt(int64(i)), Root: emptyRoot, - CodeHash: getACodeHash(i), + CodeHash: getCodeHash(i), }) key := key32(i) - elem := &kv{key, value, false} + elem := &kv{key, value} accTrie.Update(elem.k, elem.v) entries = append(entries, elem) } sort.Sort(entries) - // Push to disk layer accTrie.Commit(nil) return accTrie, entries } -// makeAccountTrieWithStorage spits out a trie, along with the leafs -func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, - map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { +// makeBoundaryAccountTrie constructs an account trie. Instead of filling +// accounts normally, this function will fill a few accounts which have +// boundary hash. +func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) { + var ( + entries entrySlice + boundaries []common.Hash + db = trie.NewDatabase(rawdb.NewMemoryDatabase()) + trie, _ = trie.New(common.Hash{}, db) + ) + // Initialize boundaries + var next common.Hash + step := new(big.Int).Sub( + new(big.Int).Div( + new(big.Int).Exp(common.Big2, common.Big256, nil), + big.NewInt(accountConcurrency), + ), common.Big1, + ) + for i := 0; i < accountConcurrency; i++ { + last := common.BigToHash(new(big.Int).Add(next.Big(), step)) + if i == accountConcurrency-1 { + last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + boundaries = append(boundaries, last) + next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1)) + } + // Fill boundary accounts + for i := 0; i < len(boundaries); i++ { + value, _ := rlp.EncodeToBytes(state.Account{ + Nonce: uint64(0), + Balance: big.NewInt(int64(i)), + Root: emptyRoot, + CodeHash: getCodeHash(uint64(i)), + }) + elem := &kv{boundaries[i].Bytes(), value} + trie.Update(elem.k, elem.v) + entries = append(entries, elem) + } + // Fill other accounts if required + for i := uint64(1); i <= uint64(n); i++ { + value, _ := rlp.EncodeToBytes(state.Account{ + Nonce: i, + Balance: big.NewInt(int64(i)), + Root: emptyRoot, + CodeHash: getCodeHash(i), + }) + elem := &kv{key32(i), value} + trie.Update(elem.k, elem.v) + entries = append(entries, elem) + } + sort.Sort(entries) + trie.Commit(nil) + return trie, entries +} + +// makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts +// has a unique storage set. +func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { var ( db = trie.NewDatabase(rawdb.NewMemoryDatabase()) accTrie, _ = trie.New(common.Hash{}, db) @@ -1066,16 +1415,63 @@ func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, ent storageTries = make(map[common.Hash]*trie.Trie) storageEntries = make(map[common.Hash]entrySlice) ) + // Create n accounts in the trie + for i := uint64(1); i <= uint64(accounts); i++ { + key := key32(i) + codehash := emptyCode[:] + if code { + codehash = getCodeHash(i) + } + // Create a storage trie + stTrie, stEntries := makeStorageTrieWithSeed(uint64(slots), i, db) + stRoot := stTrie.Hash() + stTrie.Commit(nil) + value, _ := rlp.EncodeToBytes(state.Account{ + Nonce: i, + Balance: big.NewInt(int64(i)), + Root: stRoot, + CodeHash: codehash, + }) + elem := &kv{key, value} + accTrie.Update(elem.k, elem.v) + entries = append(entries, elem) + + storageTries[common.BytesToHash(key)] = stTrie + storageEntries[common.BytesToHash(key)] = stEntries + } + sort.Sort(entries) + + accTrie.Commit(nil) + return accTrie, entries, storageTries, storageEntries +} +// makeAccountTrieWithStorage spits out a trie, along with the leafs +func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { + var ( + db = trie.NewDatabase(rawdb.NewMemoryDatabase()) + accTrie, _ = trie.New(common.Hash{}, db) + entries entrySlice + storageTries = make(map[common.Hash]*trie.Trie) + storageEntries = make(map[common.Hash]entrySlice) + ) // Make a storage trie which we reuse for the whole lot - stTrie, stEntries := makeStorageTrie(slots, db) + var ( + stTrie *trie.Trie + stEntries entrySlice + ) + if boundary { + stTrie, stEntries = makeBoundaryStorageTrie(slots, db) + } else { + stTrie, stEntries = makeStorageTrieWithSeed(uint64(slots), 0, db) + } stRoot := stTrie.Hash() + // Create n accounts in the trie for i := uint64(1); i <= uint64(accounts); i++ { key := key32(i) codehash := emptyCode[:] if code { - codehash = getACodeHash(i) + codehash = getCodeHash(i) } value, _ := rlp.EncodeToBytes(state.Account{ Nonce: i, @@ -1083,7 +1479,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, ent Root: stRoot, CodeHash: codehash, }) - elem := &kv{key, value, false} + elem := &kv{key, value} accTrie.Update(elem.k, elem.v) entries = append(entries, elem) // we reuse the same one for all accounts @@ -1096,23 +1492,116 @@ func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, ent return accTrie, entries, storageTries, storageEntries } -// makeStorageTrie fills a storage trie with n items, returning the -// not-yet-committed trie and the sorted entries -func makeStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) { +// makeStorageTrieWithSeed fills a storage trie with n items, returning the +// not-yet-committed trie and the sorted entries. The seeds can be used to ensure +// that tries are unique. +func makeStorageTrieWithSeed(n, seed uint64, db *trie.Database) (*trie.Trie, entrySlice) { trie, _ := trie.New(common.Hash{}, db) var entries entrySlice - for i := uint64(1); i <= uint64(n); i++ { - // store 'i' at slot 'i' - slotValue := key32(i) + for i := uint64(1); i <= n; i++ { + // store 'x' at slot 'x' + slotValue := key32(i + seed) rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:])) slotKey := key32(i) key := crypto.Keccak256Hash(slotKey[:]) - elem := &kv{key[:], rlpSlotValue, false} + elem := &kv{key[:], rlpSlotValue} + trie.Update(elem.k, elem.v) + entries = append(entries, elem) + } + sort.Sort(entries) + trie.Commit(nil) + return trie, entries +} + +// makeBoundaryStorageTrie constructs a storage trie. Instead of filling +// storage slots normally, this function will fill a few slots which have +// boundary hash. +func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) { + var ( + entries entrySlice + boundaries []common.Hash + trie, _ = trie.New(common.Hash{}, db) + ) + // Initialize boundaries + var next common.Hash + step := new(big.Int).Sub( + new(big.Int).Div( + new(big.Int).Exp(common.Big2, common.Big256, nil), + big.NewInt(accountConcurrency), + ), common.Big1, + ) + for i := 0; i < accountConcurrency; i++ { + last := common.BigToHash(new(big.Int).Add(next.Big(), step)) + if i == accountConcurrency-1 { + last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + boundaries = append(boundaries, last) + next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1)) + } + // Fill boundary slots + for i := 0; i < len(boundaries); i++ { + key := boundaries[i] + val := []byte{0xde, 0xad, 0xbe, 0xef} + + elem := &kv{key[:], val} + trie.Update(elem.k, elem.v) + entries = append(entries, elem) + } + // Fill other slots if required + for i := uint64(1); i <= uint64(n); i++ { + slotKey := key32(i) + key := crypto.Keccak256Hash(slotKey[:]) + + slotValue := key32(i) + rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:])) + + elem := &kv{key[:], rlpSlotValue} trie.Update(elem.k, elem.v) entries = append(entries, elem) } sort.Sort(entries) + trie.Commit(nil) return trie, entries } + +func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) { + t.Helper() + triedb := trie.NewDatabase(db) + accTrie, err := trie.New(root, triedb) + if err != nil { + t.Fatal(err) + } + accounts, slots := 0, 0 + accIt := trie.NewIterator(accTrie.NodeIterator(nil)) + for accIt.Next() { + var acc struct { + Nonce uint64 + Balance *big.Int + Root common.Hash + CodeHash []byte + } + if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil { + log.Crit("Invalid account encountered during snapshot creation", "err", err) + } + accounts++ + if acc.Root != emptyRoot { + storeTrie, err := trie.NewSecure(acc.Root, triedb) + if err != nil { + t.Fatal(err) + } + storeIt := trie.NewIterator(storeTrie.NodeIterator(nil)) + for storeIt.Next() { + slots++ + } + if err := storeIt.Err; err != nil { + t.Fatal(err) + } + } + } + if err := accIt.Err; err != nil { + t.Fatal(err) + } + t.Logf("accounts: %d, slots: %d", accounts, slots) +} diff --git a/eth/state_accessor.go b/eth/state_accessor.go index dd40278acd..285c2cccab 100644 --- a/eth/state_accessor.go +++ b/eth/state_accessor.go @@ -31,39 +31,68 @@ import ( ) // stateAtBlock retrieves the state database associated with a certain block. -// If no state is locally available for the given block, a number of blocks are -// attempted to be reexecuted to generate the desired state. -func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64) (statedb *state.StateDB, release func(), err error) { - // If we have the state fully available, use that - statedb, err = eth.blockchain.StateAt(block.Root()) - if err == nil { - return statedb, func() {}, nil +// If no state is locally available for the given block, a number of blocks +// are attempted to be reexecuted to generate the desired state. The optional +// base layer statedb can be passed then it's regarded as the statedb of the +// parent block. +func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (statedb *state.StateDB, err error) { + var ( + current *types.Block + database state.Database + report = true + origin = block.NumberU64() + ) + // Check the live database first if we have the state fully available, use that. + if checkLive { + statedb, err = eth.blockchain.StateAt(block.Root()) + if err == nil { + return statedb, nil + } } - // Otherwise try to reexec blocks until we find a state or reach our limit - origin := block.NumberU64() - database := state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16, Preimages: true}) + if base != nil { + // The optional base statedb is given, mark the start point as parent block + statedb, database, report = base, base.Database(), false + current = eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) + } else { + // Otherwise try to reexec blocks until we find a state or reach our limit + current = block - for i := uint64(0); i < reexec; i++ { - if block.NumberU64() == 0 { - return nil, nil, errors.New("genesis state is missing") - } - parent := eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) - if parent == nil { - return nil, nil, fmt.Errorf("missing block %v %d", block.ParentHash(), block.NumberU64()-1) + // Create an ephemeral trie.Database for isolating the live one. Otherwise + // the internal junks created by tracing will be persisted into the disk. + database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16}) + + // If we didn't check the dirty database, do check the clean one, otherwise + // we would rewind past a persisted block (specific corner case is chain + // tracing from the genesis). + if !checkLive { + statedb, err = state.New(current.Root(), database, nil) + if err == nil { + return statedb, nil + } } - block = parent + // Database does not have the state for the given block, try to regenerate + for i := uint64(0); i < reexec; i++ { + if current.NumberU64() == 0 { + return nil, errors.New("genesis state is missing") + } + parent := eth.blockchain.GetBlock(current.ParentHash(), current.NumberU64()-1) + if parent == nil { + return nil, fmt.Errorf("missing block %v %d", current.ParentHash(), current.NumberU64()-1) + } + current = parent - statedb, err = state.New(block.Root(), database, nil) - if err == nil { - break + statedb, err = state.New(current.Root(), database, nil) + if err == nil { + break + } } - } - if err != nil { - switch err.(type) { - case *trie.MissingNodeError: - return nil, nil, fmt.Errorf("required historical state unavailable (reexec=%d)", reexec) - default: - return nil, nil, err + if err != nil { + switch err.(type) { + case *trie.MissingNodeError: + return nil, fmt.Errorf("required historical state unavailable (reexec=%d)", reexec) + default: + return nil, err + } } } // State was available at historical point, regenerate @@ -72,138 +101,62 @@ func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64) (statedb *s logged time.Time parent common.Hash ) - defer func() { - if err != nil && parent != (common.Hash{}) { - database.TrieDB().Dereference(parent) - } - }() - for block.NumberU64() < origin { + for current.NumberU64() < origin { // Print progress logs if long enough time elapsed - if time.Since(logged) > 8*time.Second { - log.Info("Regenerating historical state", "block", block.NumberU64()+1, "target", origin, "remaining", origin-block.NumberU64()-1, "elapsed", time.Since(start)) + if time.Since(logged) > 8*time.Second && report { + log.Info("Regenerating historical state", "block", current.NumberU64()+1, "target", origin, "remaining", origin-current.NumberU64()-1, "elapsed", time.Since(start)) logged = time.Now() } // Retrieve the next block to regenerate and process it - if block = eth.blockchain.GetBlockByNumber(block.NumberU64() + 1); block == nil { - return nil, nil, fmt.Errorf("block #%d not found", block.NumberU64()+1) + next := current.NumberU64() + 1 + if current = eth.blockchain.GetBlockByNumber(next); current == nil { + return nil, fmt.Errorf("block #%d not found", next) } - _, _, _, err := eth.blockchain.Processor().Process(block, statedb, vm.Config{}) + _, _, _, err := eth.blockchain.Processor().Process(current, statedb, vm.Config{}) if err != nil { - return nil, nil, fmt.Errorf("processing block %d failed: %v", block.NumberU64(), err) + return nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err) } // Finalize the state so any modifications are written to the trie root, err := statedb.Commit(eth.blockchain.Config().IsEnabled(eth.blockchain.Config().GetEIP161dTransition, block.Number())) if err != nil { - return nil, nil, err + return nil, err } statedb, err = state.New(root, database, nil) if err != nil { - return nil, nil, fmt.Errorf("state reset after block %d failed: %v", block.NumberU64(), err) - } - database.TrieDB().Reference(root, common.Hash{}) - if parent != (common.Hash{}) { - database.TrieDB().Dereference(parent) - } - parent = root - } - nodes, imgs := database.TrieDB().Size() - log.Info("Historical state regenerated", "block", block.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs) - return statedb, func() { database.TrieDB().Dereference(parent) }, nil -} - -// statesInRange retrieves a batch of state databases associated with the specific -// block ranges. If no state is locally available for the given range, a number of -// blocks are attempted to be reexecuted to generate the ancestor state. -func (eth *Ethereum) statesInRange(fromBlock, toBlock *types.Block, reexec uint64) (states []*state.StateDB, release func(), err error) { - statedb, err := eth.blockchain.StateAt(fromBlock.Root()) - if err != nil { - statedb, _, err = eth.stateAtBlock(fromBlock, reexec) - } - if err != nil { - return nil, nil, err - } - states = append(states, statedb.Copy()) - - var ( - logged time.Time - parent common.Hash - start = time.Now() - refs = []common.Hash{fromBlock.Root()} - database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16, Preimages: true}) - ) - // Release all resources(including the states referenced by `stateAtBlock`) - // if error is returned. - defer func() { - if err != nil { - for _, ref := range refs { - database.TrieDB().Dereference(ref) - } - } - }() - for i := fromBlock.NumberU64() + 1; i <= toBlock.NumberU64(); i++ { - // Print progress logs if long enough time elapsed - if time.Since(logged) > 8*time.Second { - logged = time.Now() - log.Info("Regenerating historical state", "block", i, "target", fromBlock.NumberU64(), "remaining", toBlock.NumberU64()-i, "elapsed", time.Since(start)) - } - // Retrieve the next block to regenerate and process it - block := eth.blockchain.GetBlockByNumber(i) - if block == nil { - return nil, nil, fmt.Errorf("block #%d not found", i) - } - _, _, _, err := eth.blockchain.Processor().Process(block, statedb, vm.Config{}) - if err != nil { - return nil, nil, fmt.Errorf("processing block %d failed: %v", block.NumberU64(), err) - } - // Finalize the state so any modifications are written to the trie - root, err := statedb.Commit(eth.blockchain.Config().IsEnabled(eth.blockchain.Config().GetEIP161dTransition, block.Number())) - if err != nil { - return nil, nil, err + return nil, fmt.Errorf("state reset after block %d failed: %v", current.NumberU64(), err) } - statedb, err := eth.blockchain.StateAt(root) - if err != nil { - return nil, nil, fmt.Errorf("state reset after block %d failed: %v", block.NumberU64(), err) - } - states = append(states, statedb.Copy()) - - // Reference the trie twice, once for us, once for the tracer database.TrieDB().Reference(root, common.Hash{}) - database.TrieDB().Reference(root, common.Hash{}) - refs = append(refs, root) - - // Dereference all past tries we ourselves are done working with if parent != (common.Hash{}) { database.TrieDB().Dereference(parent) } parent = root } - // release is handler to release all states referenced, including - // the one referenced in `stateAtBlock`. - release = func() { - for _, ref := range refs { - database.TrieDB().Dereference(ref) - } + if report { + nodes, imgs := database.TrieDB().Size() + log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs) } - return states, release, nil + return statedb, nil } // stateAtTransaction returns the execution environment of a certain transaction. -func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, func(), error) { +func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) { // Short circuit if it's genesis block. if block.NumberU64() == 0 { - return nil, vm.BlockContext{}, nil, nil, errors.New("no transaction in genesis") + return nil, vm.BlockContext{}, nil, errors.New("no transaction in genesis") } // Create the parent state database parent := eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) if parent == nil { - return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("parent %#x not found", block.ParentHash()) + return nil, vm.BlockContext{}, nil, fmt.Errorf("parent %#x not found", block.ParentHash()) } - statedb, release, err := eth.stateAtBlock(parent, reexec) + // Lookup the statedb of parent block from the live database, + // otherwise regenerate it on the flight. + statedb, err := eth.stateAtBlock(parent, reexec, nil, true) if err != nil { - return nil, vm.BlockContext{}, nil, nil, err + return nil, vm.BlockContext{}, nil, err } if txIndex == 0 && len(block.Transactions()) == 0 { - return nil, vm.BlockContext{}, statedb, release, nil + return nil, vm.BlockContext{}, statedb, nil } // Recompute transactions up to the target index. signer := types.MakeSigner(eth.blockchain.Config(), block.Number()) @@ -213,19 +166,18 @@ func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(block.Header(), eth.blockchain, nil) if idx == txIndex { - return msg, context, statedb, release, nil + return msg, context, statedb, nil } // Not yet the searched for transaction, execute on top of the current state vmenv := vm.NewEVM(context, txContext, statedb, eth.blockchain.Config(), vm.Config{}) + statedb.Prepare(tx.Hash(), block.Hash(), idx) if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { - release() - return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) } // Ensure any modifications are committed to the state // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect // statedb.Finalise(vmenv.ChainConfig().IsEnabled(vmenv.ChainConfig().GetEIP161dTransition, block.Number())) } - release() - return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) + return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) } diff --git a/eth/sync.go b/eth/sync.go index 0b2094438e..51952264f3 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -348,8 +348,8 @@ func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) { } } // Nope, we're really full syncing - head := cs.handler.chain.CurrentHeader() - td := cs.handler.chain.GetTd(head.Hash(), head.Number.Uint64()) + head := cs.handler.chain.CurrentBlock() + td := cs.handler.chain.GetTd(head.Hash(), head.NumberU64()) return downloader.FullSync, td } diff --git a/eth/tracers/api.go b/eth/tracers/api.go index 1d2d71075c..3e7c8d93ed 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -69,9 +69,8 @@ type Backend interface { ChainConfig() ctypes.ChainConfigurator Engine() consensus.Engine ChainDb() ethdb.Database - StateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, func(), error) - StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, func(), error) - StatesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, func(), error) + StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, error) + StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) } // API is the collection of tracing APIs exposed over the private debugging endpoint. @@ -172,6 +171,13 @@ type StdTraceConfig struct { TxHash common.Hash } +// txTraceContext is the contextual infos about a transaction before it gets run. +type txTraceContext struct { + index int // Index of the transaction within the block + hash common.Hash // Hash of the transaction + block common.Hash // Hash of the block containing the transaction +} + // txTraceResult is the result of a single transaction trace. type txTraceResult struct { Result interface{} `json:"result,omitempty"` // Trace results produced by the tracer @@ -183,6 +189,7 @@ type txTraceResult struct { type blockTraceTask struct { statedb *state.StateDB // Intermediate state prepped for tracing block *types.Block // Block to trace the transactions from + rootref common.Hash // Trie root reference held for this task results []*txTraceResult // Trace results procudes by the task } @@ -197,9 +204,8 @@ type blockTraceResult struct { // txTraceTask represents a single transaction trace task when an entire block // is being traced. type txTraceTask struct { - statedb *state.StateDB // Intermediate state prepped for tracing - index int // Transaction offset in the block - taskExtraContext map[string]interface{} // Extra context passed to the JS tracer on init + statedb *state.StateDB // Intermediate state prepped for tracing + index int // Transaction offset in the block } // TraceChain returns the structured logs created during the execution of EVM @@ -230,33 +236,22 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config } sub := notifier.CreateSubscription() - // Shift the border to a block ahead in order to get the states - // before these blocks. - endBlock, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(end.NumberU64()-1), end.ParentHash()) - if err != nil { - return nil, err - } // Prepare all the states for tracing. Note this procedure can take very // long time. Timeout mechanism is necessary. reexec := defaultTraceReexec if config != nil && config.Reexec != nil { reexec = *config.Reexec } - states, release, err := api.backend.StatesInRange(ctx, start, endBlock, reexec) - if err != nil { - return nil, err - } - defer release() // Release all the resources in the last step. - blocks := int(end.NumberU64() - start.NumberU64()) threads := runtime.NumCPU() if threads > blocks { threads = blocks } var ( - pend = new(sync.WaitGroup) - tasks = make(chan *blockTraceTask, threads) - results = make(chan *blockTraceTask, threads) + pend = new(sync.WaitGroup) + tasks = make(chan *blockTraceTask, threads) + results = make(chan *blockTraceTask, threads) + localctx = context.Background() ) for th := 0; th < threads; th++ { pend.Add(1) @@ -266,18 +261,16 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config // Fetch and execute the next block trace tasks for task := range tasks { signer := types.MakeSigner(api.backend.ChainConfig(), task.block.Number()) - blockCtx := core.NewEVMBlockContext(task.block.Header(), api.chainContext(ctx), nil) + blockCtx := core.NewEVMBlockContext(task.block.Header(), api.chainContext(localctx), nil) // Trace all the transactions contained within for i, tx := range task.block.Transactions() { - taskExtraContext := map[string]interface{}{ - "blockNumber": task.block.NumberU64(), - "blockHash": task.block.Hash().Hex(), - "transactionHash": tx.Hash().Hex(), - "transactionPosition": uint64(i), - } - msg, _ := tx.AsMessage(signer) - res, err := api.traceTx(ctx, msg, blockCtx, task.statedb, taskExtraContext, config) + txctx := &txTraceContext{ + index: i, + hash: tx.Hash(), + block: task.block.Hash(), + } + res, err := api.traceTx(localctx, msg, txctx, blockCtx, task.statedb, nil, config) if err != nil { task.results[i] = &txTraceResult{Error: err.Error()} log.Warn("Tracing failed", "hash", tx.Hash(), "block", task.block.NumberU64(), "err", err) @@ -301,10 +294,12 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config go func() { var ( - logged time.Time - number uint64 - traced uint64 - failed error + logged time.Time + number uint64 + traced uint64 + failed error + parent common.Hash + statedb *state.StateDB ) // Ensure everything is properly cleaned up on any exit path defer func() { @@ -322,7 +317,7 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config close(results) }() // Feed all the blocks both into the tracer, as well as fast process concurrently - for number = start.NumberU64() + 1; number <= end.NumberU64(); number++ { + for number = start.NumberU64(); number < end.NumberU64(); number++ { // Stop tracing if interruption was requested select { case <-notifier.Closed(): @@ -334,16 +329,39 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config logged = time.Now() log.Info("Tracing chain segment", "start", start.NumberU64(), "end", end.NumberU64(), "current", number, "transactions", traced, "elapsed", time.Since(begin)) } - // Retrieve the next block to trace - block, err := api.blockByNumber(ctx, rpc.BlockNumber(number)) + // Retrieve the parent state to trace on top + block, err := api.blockByNumber(localctx, rpc.BlockNumber(number)) + if err != nil { + failed = err + break + } + // Prepare the statedb for tracing. Don't use the live database for + // tracing to avoid persisting state junks into the database. + statedb, err = api.backend.StateAtBlock(localctx, block, reexec, statedb, false) + if err != nil { + failed = err + break + } + if statedb.Database().TrieDB() != nil { + // Hold the reference for tracer, will be released at the final stage + statedb.Database().TrieDB().Reference(block.Root(), common.Hash{}) + + // Release the parent state because it's already held by the tracer + if parent != (common.Hash{}) { + statedb.Database().TrieDB().Dereference(parent) + } + } + parent = block.Root() + + next, err := api.blockByNumber(localctx, rpc.BlockNumber(number+1)) if err != nil { failed = err break } // Send the block over to the concurrent tracers (if not in the fast-forward phase) - txs := block.Transactions() + txs := next.Transactions() select { - case tasks <- &blockTraceTask{statedb: states[int(number-start.NumberU64()-1)], block: block, results: make([]*txTraceResult, len(txs))}: + case tasks <- &blockTraceTask{statedb: statedb.Copy(), block: next, rootref: block.Root(), results: make([]*txTraceResult, len(txs))}: case <-notifier.Closed(): return } @@ -366,6 +384,10 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config } done[uint64(result.Block)] = result + // Dereference any parent tries held in memory by this task + if res.statedb.Database().TrieDB() != nil { + res.statedb.Database().TrieDB().Dereference(res.rootref) + } // Stream completed traces to the user, aborting on the first error for result, ok := done[next]; ok; result, ok = done[next] { if len(result.Traces) > 0 || next == end.NumberU64() { @@ -469,12 +491,10 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec) + statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true) if err != nil { return nil, err } - defer release() - // Execute all the transaction contained within the block concurrently var ( signer = types.MakeSigner(api.backend.ChainConfig(), block.Number()) @@ -489,6 +509,7 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac threads = len(txs) } blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) + blockHash := block.Hash() for th := 0; th < threads; th++ { pend.Add(1) go func() { @@ -496,7 +517,12 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac // Fetch and execute the next transaction trace tasks for task := range jobs { msg, _ := txs[task.index].AsMessage(signer) - res, err := api.traceTx(ctx, msg, blockCtx, task.statedb, task.taskExtraContext, config) + txctx := &txTraceContext{ + index: task.index, + hash: txs[task.index].Hash(), + block: blockHash, + } + res, err := api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, nil, config) if err != nil { results[task.index] = &txTraceResult{Error: err.Error()} continue @@ -508,21 +534,13 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac // Feed the transactions into the tracers and return var failed error for i, tx := range txs { - taskExtraContext := map[string]interface{}{ - "blockNumber": block.NumberU64(), - "blockHash": block.Hash().Hex(), - "transactionHash": tx.Hash().Hex(), - "transactionPosition": uint64(i), - } - // Send the trace task over for execution - jobs <- &txTraceTask{statedb: statedb.Copy(), index: i, taskExtraContext: taskExtraContext} + jobs <- &txTraceTask{statedb: statedb.Copy(), index: i} // Generate the next state snapshot fast without tracing msg, _ := tx.AsMessage(signer) - txContext := core.NewEVMTxContext(msg) - - vmenv := vm.NewEVM(blockCtx, txContext, statedb, api.backend.ChainConfig(), vm.Config{}) + statedb.Prepare(tx.Hash(), block.Hash(), i) + vmenv := vm.NewEVM(blockCtx, core.NewEVMTxContext(msg), statedb, api.backend.ChainConfig(), vm.Config{}) if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil { failed = err break @@ -562,12 +580,10 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec) + statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true) if err != nil { return nil, err } - defer release() - // Retrieve the tracing configurations, or use default values var ( logConfig vm.LogConfig @@ -649,6 +665,7 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block } // Execute the transaction and flush any traces to disk vmenv := vm.NewEVM(vmctx, txContext, statedb, chainConfig, vmConf) + statedb.Prepare(tx.Hash(), block.Hash(), i) _, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())) if writer != nil { writer.Flush() @@ -686,7 +703,7 @@ func containsTx(block *types.Block, hash common.Hash) bool { // TraceTransaction returns the structured logs created during the execution of EVM // and returns them as a JSON object. func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config *TraceConfig) (interface{}, error) { - tx, blockHash, blockNumber, index, err := api.backend.GetTransaction(ctx, hash) + _, blockHash, blockNumber, index, err := api.backend.GetTransaction(ctx, hash) if err != nil { return nil, err } @@ -702,20 +719,16 @@ func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config * if err != nil { return nil, err } - msg, vmctx, statedb, release, err := api.backend.StateAtTransaction(ctx, block, int(index), reexec) + msg, vmctx, statedb, err := api.backend.StateAtTransaction(ctx, block, int(index), reexec) if err != nil { return nil, err } - defer release() - - taskExtraContext := map[string]interface{}{ - "blockNumber": block.NumberU64(), - "blockHash": blockHash.Hex(), - "transactionHash": tx.Hash().Hex(), - "transactionPosition": index, + txctx := &txTraceContext{ + index: int(index), + hash: hash, + block: blockHash, } - - return api.traceTx(ctx, msg, vmctx, statedb, taskExtraContext, config) + return api.traceTx(ctx, msg, txctx, vmctx, statedb, nil, config) } // TraceCall lets you trace a given eth_call. It collects the structured logs @@ -741,12 +754,10 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHa if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, release, err := api.backend.StateAtBlock(ctx, block, reexec) + statedb, err := api.backend.StateAtBlock(ctx, block, reexec, nil, true) if err != nil { return nil, err } - defer release() - // Execute the trace msg := args.ToMessage(api.backend.RPCGasCap()) vmctx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) @@ -789,7 +800,7 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHa "hasFromSufficientBalanceForGasCost": hasFromSufficientBalanceForGasCost, } - return api.traceTx(ctx, msg, vmctx, statedb, taskExtraContext, config) + return api.traceTx(ctx, msg, new(txTraceContext), vmctx, statedb, taskExtraContext, config) } // TraceCallMany lets you trace a given eth_call. It collects the structured logs created during the execution of EVM @@ -814,11 +825,10 @@ func (api *API) TraceCallMany(ctx context.Context, txs []ethapi.CallArgs, blockN if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, release, err := api.backend.StateAtBlock(ctx, block, reexec) + statedb, err := api.backend.StateAtBlock(ctx, block, reexec, nil, true) if err != nil { return nil, err } - defer release() var results = make([]interface{}, len(txs)) for idx, args := range txs { @@ -864,7 +874,7 @@ func (api *API) TraceCallMany(ctx context.Context, txs []ethapi.CallArgs, blockN "hasFromSufficientBalanceForGasCost": hasFromSufficientBalanceForGasCost, } - res, err := api.traceTx(ctx, msg, vmctx, statedb, taskExtraContext, config) + res, err := api.traceTx(ctx, msg, new(txTraceContext), vmctx, statedb, taskExtraContext, config) if err != nil { results[idx] = &txTraceResult{Error: err.Error()} continue @@ -883,7 +893,7 @@ func (api *API) TraceCallMany(ctx context.Context, txs []ethapi.CallArgs, blockN // traceTx configures a new tracer according to the provided configuration, and // executes the given message in the provided environment. The return value will // be tracer dependent. -func (api *API) traceTx(ctx context.Context, message core.Message, vmctx vm.BlockContext, statedb *state.StateDB, extraContext map[string]interface{}, config *TraceConfig) (interface{}, error) { +func (api *API) traceTx(ctx context.Context, message core.Message, txctx *txTraceContext, vmctx vm.BlockContext, statedb *state.StateDB, extraContext map[string]interface{}, config *TraceConfig) (interface{}, error) { // Assemble the structured logger or the JavaScript tracer var ( tracer vm.Tracer @@ -907,7 +917,9 @@ func (api *API) traceTx(ctx context.Context, message core.Message, vmctx vm.Bloc deadlineCtx, cancel := context.WithTimeout(ctx, timeout) go func() { <-deadlineCtx.Done() - tracer.(*Tracer).Stop(errors.New("execution timeout")) + if deadlineCtx.Err() == context.DeadlineExceeded { + tracer.(*Tracer).Stop(errors.New("execution timeout")) + } }() defer cancel() @@ -917,16 +929,26 @@ func (api *API) traceTx(ctx context.Context, message core.Message, vmctx vm.Bloc default: tracer = vm.NewStructLogger(config.LogConfig) } - // Run the transaction with tracing enabled. vmenv := vm.NewEVM(vmctx, txContext, statedb, api.backend.ChainConfig(), vm.Config{Debug: true, Tracer: tracer}) + // Call Prepare to clear out the statedb access list + statedb.Prepare(txctx.hash, txctx.block, txctx.index) + switch tracer := tracer.(type) { case *Tracer: if extraContext == nil { extraContext = map[string]interface{}{} } + if txctx.hash != (common.Hash{}) { + extraContext["transactionHash"] = txctx.hash.Hex() + } + if txctx.block != (common.Hash{}) { + extraContext["blockHash"] = txctx.block.Hex() + } + extraContext["transactionPosition"] = uint64(txctx.index) + // Add useful context for all tracers extraContext["from"] = message.From() if message.To() != nil { diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index 03e9e07f16..1704b1fa37 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -140,25 +140,25 @@ func (b *testBackend) ChainDb() ethdb.Database { return b.chaindb } -func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, func(), error) { +func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, error) { statedb, err := b.chain.StateAt(block.Root()) if err != nil { - return nil, nil, errStateNotFound + return nil, errStateNotFound } - return statedb, func() {}, nil + return statedb, nil } -func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, func(), error) { +func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) { parent := b.chain.GetBlock(block.ParentHash(), block.NumberU64()-1) if parent == nil { - return nil, vm.BlockContext{}, nil, nil, errBlockNotFound + return nil, vm.BlockContext{}, nil, errBlockNotFound } statedb, err := b.chain.StateAt(parent.Root()) if err != nil { - return nil, vm.BlockContext{}, nil, nil, errStateNotFound + return nil, vm.BlockContext{}, nil, errStateNotFound } if txIndex == 0 && len(block.Transactions()) == 0 { - return nil, vm.BlockContext{}, statedb, func() {}, nil + return nil, vm.BlockContext{}, statedb, nil } // Recompute transactions up to the target index. signer := types.MakeSigner(b.chainConfig, block.Number()) @@ -167,31 +167,15 @@ func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(block.Header(), b.chain, nil) if idx == txIndex { - return msg, context, statedb, func() {}, nil + return msg, context, statedb, nil } vmenv := vm.NewEVM(context, txContext, statedb, b.chainConfig, vm.Config{}) if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { - return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) } statedb.Finalise(vmenv.ChainConfig().IsEnabled(vmenv.ChainConfig().GetEIP161dTransition, block.Number())) } - return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) -} - -func (b *testBackend) StatesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, func(), error) { - var result []*state.StateDB - for number := fromBlock.NumberU64(); number <= toBlock.NumberU64(); number += 1 { - block := b.chain.GetBlockByNumber(number) - if block == nil { - return nil, nil, errBlockNotFound - } - statedb, err := b.chain.StateAt(block.Root()) - if err != nil { - return nil, nil, errStateNotFound - } - result = append(result, statedb) - } - return result, func() {}, nil + return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) } func TestTraceCall(t *testing.T) { diff --git a/eth/tracers/internal/tracers/assets.go b/eth/tracers/internal/tracers/assets.go index 4afbb7884f..322f837412 100644 --- a/eth/tracers/internal/tracers/assets.go +++ b/eth/tracers/internal/tracers/assets.go @@ -147,7 +147,7 @@ func call_tracerJs() (*asset, error) { return a, nil } -var _call_tracer_parityJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xdc\x3b\xef\x6f\x1b\x37\xb2\x9f\xad\xbf\x62\xce\x1f\xae\x12\xa2\x48\x72\xd2\xcb\x03\xe4\x73\x0e\xae\xe3\xb4\xc6\xb9\x71\x60\x3b\x2d\x8a\xc0\x78\x47\xed\xce\x4a\x8c\x57\xe4\x1e\xc9\xb5\xac\xa6\xfe\xdf\x1f\x66\x48\xee\x0f\x69\xe5\xb8\xbd\xe0\xbd\xe2\xe5\x4b\xb4\x24\x67\x38\x33\x9c\xdf\xa4\xc7\x63\x38\xd1\xc5\xda\xc8\xf9\xc2\xc1\x8b\xc9\xc1\x7f\xc1\xf5\x02\x61\xae\x9f\xa3\x5b\xa0\xc1\x72\x09\xc7\xa5\x5b\x68\x63\x7b\xe3\x31\x5c\x2f\xa4\x85\x4c\xe6\x08\xd2\x42\x21\x8c\x03\x9d\x81\xdb\x58\x9f\xcb\x99\x11\x66\x3d\xea\x8d\xc7\x1e\xa6\x73\x9a\x30\x64\x06\x11\xac\xce\xdc\x4a\x18\x9c\xc2\x5a\x97\x90\x08\x05\x06\x53\x69\x9d\x91\xb3\xd2\x21\x48\x07\x42\xa5\x63\x6d\x60\xa9\x53\x99\xad\x09\xa5\x74\x50\xaa\x14\x0d\x6f\xed\xd0\x2c\x6d\xa4\xe3\xfb\x77\x1f\xe0\x1c\xad\x45\x03\xdf\xa3\x42\x23\x72\x78\x5f\xce\x72\x99\xc0\xb9\x4c\x50\x59\x04\x61\xa1\xa0\x11\xbb\xc0\x14\x66\x8c\x8e\x00\xdf\x12\x29\x57\x81\x14\x78\xab\x4b\x95\x0a\x27\xb5\x1a\x02\x4a\xa2\x1c\xee\xd0\x58\xa9\x15\xbc\x8c\x5b\x05\x84\x43\xd0\x86\x90\xf4\x85\x23\x06\x0c\xe8\x82\xe0\x06\x20\xd4\x1a\x72\xe1\x6a\xd0\x27\x08\xa4\xe6\x3b\x05\xa9\x78\x9b\x85\x2e\x10\xdc\x42\x38\xe2\x7a\x25\xf3\x1c\x66\x08\xa5\xc5\xac\xcc\x87\x84\x6d\x56\x3a\xf8\xf9\xec\xfa\x87\x8b\x0f\xd7\x70\xfc\xee\x17\xf8\xf9\xf8\xf2\xf2\xf8\xdd\xf5\x2f\x87\xb0\x92\x6e\xa1\x4b\x07\x78\x87\x1e\x95\x5c\x16\xb9\xc4\x14\x56\xc2\x18\xa1\xdc\x1a\x74\x46\x18\x7e\x3c\xbd\x3c\xf9\xe1\xf8\xdd\xf5\xf1\x77\x67\xe7\x67\xd7\xbf\x80\x36\xf0\xf6\xec\xfa\xdd\xe9\xd5\x15\xbc\xbd\xb8\x84\x63\x78\x7f\x7c\x79\x7d\x76\xf2\xe1\xfc\xf8\x12\xde\x7f\xb8\x7c\x7f\x71\x75\x3a\x82\x2b\x24\xaa\x90\xe0\xbf\x2c\xf3\x8c\x4f\xcf\x20\xa4\xe8\x84\xcc\x6d\x94\xc4\x2f\xba\x04\xbb\xd0\x65\x9e\xc2\x42\xdc\x21\x18\x4c\x50\xde\x61\x0a\x02\x12\x5d\xac\x9f\x7c\xa8\x84\x4b\xe4\x5a\xcd\x99\xe7\x9d\x0a\x09\x67\x19\x28\xed\x86\x60\x11\xe1\xef\x0b\xe7\x8a\xe9\x78\xbc\x5a\xad\x46\x73\x55\x8e\xb4\x99\x8f\x73\x8f\xce\x8e\x5f\x8f\x7a\x84\x33\x11\x79\x7e\x6d\x44\x82\x86\x0e\x47\x40\x56\x92\xf8\x73\xbd\x52\xe0\x8c\x50\x56\x24\x74\xd4\xf4\x3b\x61\x65\x14\x0e\xf0\x9e\xbe\x9c\x25\xa5\x05\x83\x85\x36\xf4\x3b\xcf\xa3\x9e\x49\xe5\xd0\x28\x91\x33\x6e\x0b\x4b\x91\x22\xcc\xd6\x20\x9a\x08\x87\x4d\x66\x48\x8d\xfc\x71\x83\x54\x99\x36\x4b\x56\xcb\x51\xef\x73\x6f\x2f\x50\x68\x9d\x48\x6e\x89\x40\xc2\x9f\x94\xc6\xa0\x72\x24\xca\xd2\x58\x79\x87\xbc\x04\xfc\x9a\x20\xcf\xd3\x9f\x7e\x04\xbc\xc7\xa4\xf4\x98\xf6\x2a\x24\x53\xf8\xf8\xf9\xe1\x66\xd8\x63\xd4\x73\x74\x27\x71\xe2\x1c\xd5\xdc\x2d\xa0\xef\x75\x5b\xe4\x03\xda\xae\xb4\x98\xf2\xd1\xd2\xe8\x52\x5a\x26\x0c\x0c\x0a\xab\x95\x1d\x42\xb2\xc0\xe4\x56\xaa\x39\x64\x46\x2f\x99\x17\xa9\x60\xae\x19\xb7\xf4\x84\xfc\xcb\x3a\x2c\xfe\x05\x4b\x74\x0b\x4d\x2a\x60\xc1\x69\x52\x6f\x22\x28\xe0\x16\xf0\xd3\x8f\xa0\x8b\x44\xa7\x38\xea\xed\x6d\xd3\x34\x85\xac\x54\x2c\xb5\xfe\x00\x3e\x1b\x74\xa5\x21\x65\x97\x76\x54\x71\x35\xca\x79\xe5\xe1\x43\x60\x2c\x45\x9b\xa0\x4a\x31\xe5\x83\xbb\xb5\xb0\x5a\xb0\xaa\xc0\x0a\xbf\xb9\x43\xf8\x54\x5a\xd7\x58\xc3\xd4\x0b\x05\xba\x24\x53\x6e\x1e\xbb\x54\xce\x73\x23\xe8\xb7\x42\xc3\x74\x8f\x7a\x7b\x15\xf0\x14\x32\x91\x5b\xa4\x7d\x0b\x61\xa4\x5b\x9f\x1a\xa3\xcd\x8f\xa2\x28\xa4\x9a\x4f\xe1\x73\x6f\x6f\x6f\x3f\xd1\x8a\x35\x06\x12\x83\x5e\x82\xc4\x2b\x58\xa7\x8d\x98\x23\x6d\x4b\xc7\x36\x17\x76\x7f\x0a\xfb\x17\xf5\xd7\x90\x80\x1f\x9f\x9d\x0b\x0b\xa5\x54\xee\xd5\xb7\xa0\xef\xd0\x64\xb9\x5e\x75\x2d\x5b\x8a\xfb\xb0\xa7\xfc\x15\x01\xef\x13\xc4\x14\xd3\xae\x95\x52\xdd\x89\x5c\xa6\xf0\xa9\x5c\x16\x24\x22\x27\x15\x93\x4c\x6b\xbf\x13\x1d\xe3\x0c\x55\xa9\x1a\x18\xbc\x43\xe3\x3c\xee\xcb\xf8\x9b\xd7\x84\x83\x4b\x85\x13\x91\xe5\x19\xf9\xe0\x26\x5f\x61\x80\xd7\x7b\x7d\xce\xe5\x52\x92\xaa\x8b\x84\x9c\xf9\xc1\xe4\xc5\xb7\xd0\x3f\x98\xbc\x78\x39\x68\x40\xf1\x4a\x0f\x54\x18\x4c\xf4\xb2\x90\xac\x5b\x82\xfe\x63\xc2\x4b\x99\xbb\xe7\x52\xc5\xa1\x61\x6f\xef\xa1\xfb\xc4\xae\x9c\x30\x4e\xaa\xf9\xcf\x92\xf4\xee\x73\x53\x22\x5e\x43\xa7\x51\x10\x52\x59\x67\xca\xa4\x96\x81\xa7\x97\x83\x56\x3c\x86\xab\x8d\xa1\xf6\xbe\x57\xb7\xb2\x60\xd7\x63\xdf\x6a\xc3\x44\xd8\x29\x7c\xf4\x5b\xda\x32\xcb\x64\x22\xc9\xcc\x67\x22\x17\x2a\xf1\x1e\x96\x75\x33\x43\xb3\xdf\xdb\x63\x1b\x96\xf6\x62\xf6\x09\x13\x77\xba\x2c\xdc\xba\x61\x27\x7a\xf6\x69\xc0\xd4\x13\x50\xff\x4e\x18\xb8\xa7\x60\xe3\x87\x21\x9c\x04\x2b\xee\x21\x3c\xf4\xf6\xf6\xa2\x51\x99\x12\x0f\x03\x8d\xe3\x31\x90\xe9\x92\x17\x90\xea\x4e\xdf\x06\x63\xa5\x23\x5d\x07\x59\x78\x77\x48\x56\x5e\x79\x1b\xb4\xa3\xde\x1e\xc1\x35\x88\xc9\xf5\x7c\x08\xe9\xcc\x13\x44\xb9\x88\x28\x5c\x69\x90\xdd\x1e\x32\xdb\x20\x97\x4b\x4c\xa5\x70\x98\xaf\x7b\x7b\x7b\x44\x2f\x4f\xc0\x11\xe4\x7a\x3e\x9a\xa3\x63\xf1\xf4\x07\x87\xbd\xbd\x3d\x99\x41\xdf\xcf\xfe\xe5\xe8\x88\x85\x9b\x49\x85\xa9\x47\xbf\xc7\x7e\x21\x13\x65\xee\xaa\x7d\x09\x28\x70\x48\x3f\x1f\x3c\x15\x3f\x23\x68\x95\xaf\x21\xa1\x64\x40\xcc\x48\x23\xed\xda\x3a\x5c\x06\xe6\xec\x10\x32\x61\xc9\x21\xc8\x0c\x56\x08\x85\xc1\xe7\xec\xef\x40\xab\x04\x03\x95\x76\x6d\xd9\xf7\x1e\x01\xed\x36\xd2\xc5\xc8\xe9\x77\xe5\x72\x86\xa6\x3f\x80\xbf\xc2\xe4\x3e\x9b\x0c\xe0\xe8\x88\x7f\x44\xda\x03\x4c\xa0\x97\xb0\xe8\x22\x30\xca\xf0\x57\xce\x48\x35\xf7\xbc\x06\x5a\xcf\x32\x10\xa0\x70\x05\x95\x27\x91\x16\x66\x48\x9e\x97\x5d\x0a\xa6\x43\x10\x69\x4a\xae\x95\x03\x44\x15\x0e\xda\x5b\xc2\x5f\xff\x4a\xfe\x9d\x08\xda\x3f\xb9\x3c\x3d\xbe\x3e\xdd\x87\xdf\x7e\x83\xd6\xc8\x8b\xfd\x41\x83\x32\xa9\x2e\xb2\x2c\x10\xe7\x1d\x6d\x81\x78\xdb\x3f\x18\x8c\xee\x44\x5e\xe2\x45\xe6\xc9\x0c\x6b\x4f\x55\x0a\x47\x01\xe6\xd9\x26\xcc\x8b\x16\x0c\x01\x8d\xc7\x70\x6c\x2d\x2e\x67\x39\x6e\xc7\xcd\x10\x58\x39\xc6\x92\xa3\xf4\xfa\x4f\xb6\x9d\x23\x69\x55\xdc\x35\x88\x9f\x29\xde\x73\xeb\x02\xa7\x00\x00\xba\x18\xf2\x00\x79\x76\x1e\x70\xfa\x07\xbc\xe7\x33\x8a\x22\x24\xad\x3a\x4e\x53\x83\xd6\xf6\x07\x03\xbf\x5c\xaa\xa2\x74\xd3\xd6\xf2\x25\x2e\xb5\x59\x8f\x2c\xe5\x0d\x7d\x66\x6d\xe8\x39\x8d\x30\x73\x61\x79\x87\xa8\xa9\xc7\x77\x42\xe6\x62\x96\xe3\xf7\xc2\xf6\xeb\x35\x67\x6a\x5a\xaf\x69\x4f\x9d\x68\xeb\xa6\x71\x8a\x3e\xe2\x1c\xcb\x8b\xc0\xf6\x27\xf7\xfb\xdb\x12\x9d\x0c\x6a\x6d\x39\x78\x35\x20\x90\x87\xc3\xca\x06\xea\xd8\x58\x94\x76\xd1\x67\x95\xab\x67\xeb\xe0\x77\x14\xad\xbe\xc3\x46\x58\xef\xb6\x75\xce\x62\x9e\x71\x0c\x20\xff\x47\xba\x37\x17\x1c\x5c\xd9\x1d\x08\xca\xa2\x6c\x39\xe3\x83\x71\x5a\x7b\x4c\xef\x2e\xae\x4f\xa7\xf0\x4f\x24\x87\xe2\xc8\xdc\xee\xfc\x99\x6f\x10\x23\x33\x9f\x53\x6c\xeb\x6d\x50\xd2\xab\xd3\xf3\xb7\x6f\x4e\xaf\xae\x2f\x3f\x9c\x5c\xef\x37\x14\x35\xc7\xcc\x11\x2b\x9d\x59\x01\x2d\x22\x74\xed\xd9\x8f\x04\xf3\xfc\xe0\xc6\x8f\xc0\x51\x87\x33\xd9\x7b\x1c\x02\x3e\xde\x30\xee\x87\x6d\xa1\xb7\x97\xfa\x23\xf8\x3a\x3a\xea\xb4\x57\xb7\xb0\xdc\xe9\xb8\xe0\x71\xed\x18\x7c\x5d\x55\x4c\x67\xb4\xe2\x3b\x1f\x9a\x1e\xa1\x79\x5b\x43\x77\xb8\xe3\xca\xc5\x85\x4c\x91\x62\x4e\xe2\x93\xa5\x4a\xef\x52\xad\xf0\xf7\x3b\xba\xe3\xf3\xf3\x96\x9b\x3b\x3e\x3f\x3f\xb9\x78\xd3\x72\x7d\x6f\x4e\xcf\x4f\xbf\x3f\xbe\x3e\xdd\x5c\x7b\x75\x7d\x7c\x7d\x76\xc2\xa3\x4d\xaf\xe8\x34\xa9\xda\x2e\xc1\x1f\x6c\x08\xbe\x72\x76\x14\xef\x39\xe8\x71\x28\xf1\x59\x4a\x83\x4f\x3b\x04\xb7\xd0\x54\xbe\x9a\x90\xa1\x66\x42\x25\x31\xd6\xda\xa8\xc4\xd2\xbe\xaf\x73\x9c\xbe\xd3\x83\xc7\x98\xed\x60\xa0\x21\x7a\xaf\xb8\x1c\x81\xd8\xcb\xf7\x9f\x2e\x0e\xf8\x07\x4c\x60\x0a\x07\x81\xbb\x47\x62\xc5\x0b\x78\x46\xe8\xff\x40\xc4\x78\xd9\x01\xf9\xe7\x8c\x1b\x5b\x36\xf9\xe7\x8c\x27\xba\x74\x17\x59\x36\x85\x4d\x41\x7f\xbb\x25\xe8\x6a\xfd\x39\xaa\xed\xf5\x7f\xdb\x5a\x1f\x62\x4f\xd4\xd1\x2f\x99\x5e\x54\x45\x3a\x08\xc6\xd1\xa1\x36\x5e\x4d\xb8\x70\x1d\xc5\x35\xc1\xf9\xf0\x67\xcb\xc8\xfc\xd6\xac\x19\x29\x9d\xbb\x2c\x50\xa5\xd0\xe7\x04\x8f\x76\xfd\x2d\x6e\x4d\x85\xa0\x0a\x7b\xbe\x86\xc9\x20\x82\x5d\x5f\xbc\xb9\x98\x52\x99\x91\x92\xa3\xa1\xaa\x8a\x8b\x42\x85\xf7\x2e\x26\xbb\xd2\x82\x15\x99\xcf\x07\xe3\x0e\x1e\x51\xb2\x10\x6a\xee\x2d\x94\xd9\xaf\xd1\x07\x3e\x3d\x17\x84\xf5\x08\x66\x72\x7e\xa6\x5c\xbf\x1a\x79\x06\x2f\x5e\x4e\x26\x81\x5b\x36\xc8\x07\xc0\xdc\x22\x34\x04\xd9\x34\xe3\x80\x72\x53\x2e\x93\xfd\x60\xd1\x5f\x3b\x01\xe8\xac\x98\xa9\x2e\x6e\xd7\xc4\x43\xaa\x2a\x8c\xc4\x3b\x04\xe9\xbe\xb1\x8c\x13\x44\x9e\xeb\x15\x45\x88\x11\xfc\x8c\x1e\xa3\x42\x64\xf7\x1d\x9a\x28\xc4\x65\xb3\x79\x50\x79\x75\xc1\xbd\x0e\x83\xb0\x14\x6b\x98\x21\x55\x13\xb7\x6b\x3e\x98\x74\xad\xc4\x52\x26\xd6\xe3\xe3\x06\x8a\xc1\xb9\x30\x8c\xd6\xe0\xbf\x4b\xb4\x0e\x53\x76\x00\x22\x71\xa5\xc8\xf3\x35\xcc\xe5\x1d\x2a\x86\xee\x93\xb4\xe3\xf9\x0d\xe1\xd5\xcb\xf1\xab\x6f\xc1\x94\x39\x0e\x46\xbd\x46\x96\x50\xb1\x1a\xe4\x4d\x13\xc1\xa2\xde\x60\xe1\x16\xfd\x01\xbc\xde\x91\x6e\x34\x95\x3b\x78\x99\x8d\xdc\xa0\x13\x0c\x9e\xc3\x81\x4f\x27\x78\xb3\x5a\x63\xba\xf2\x92\xa6\x42\x35\x7d\xc0\xb6\x16\x7d\x6e\x6a\x78\xff\x56\x18\x91\x8b\x19\x0e\xa6\xdc\xa2\x64\xf2\x56\x22\xf4\xa8\xe8\x48\xa1\xc8\x85\x54\x20\x92\x44\x97\xca\xd1\xb1\xc5\x76\x53\xbe\xa6\xf8\xfb\x8d\x8b\xf8\xb8\x9b\x27\x92\x04\xad\x8d\xe1\x98\xcf\x9c\x88\x12\x4b\x82\xa6\x12\x59\xa6\xd8\x38\x53\xf2\xc9\x9a\x43\x60\x58\xb1\x92\x79\x1e\x11\x2e\xb5\xa5\x4d\x66\x08\x2b\xa3\x29\xcb\x94\x54\xf5\x4a\x52\x3b\x3a\x2b\x0b\x5a\x81\x80\x5c\x73\xc9\xcf\x9e\x15\x84\x99\xdb\x91\x8f\xab\x6c\xb2\xda\x80\xd2\xab\x51\x3b\x27\x6b\x6a\xba\x2f\x79\x83\x7e\x77\x67\x98\x97\xa7\x3f\x9d\x5e\x56\xb9\xe5\x93\x4f\x6e\x14\x0b\xd6\xae\x5e\x48\x15\xb7\xf8\x10\x7e\x95\x7a\x2e\x6c\xb2\x30\x03\xef\x71\x58\x40\xba\x74\xc4\x11\xdb\x82\xef\x24\x08\x4b\xcc\x53\xd8\x11\x52\xf9\xce\x9f\xdf\xa3\x10\xd6\xc6\xae\x15\xcb\x36\x26\xe8\x29\xde\x61\xae\x0b\x34\xdb\xb6\xbc\x8b\xd7\xeb\x0f\x97\xef\xf6\x77\xeb\xf8\xd1\x13\x74\xdc\x47\x95\x6d\x0f\x3e\xd9\x0c\xf9\x71\xf5\x39\xaa\x27\x94\x94\xbf\x43\xf4\x41\x76\x47\xbb\xc2\xac\xa7\x70\x18\x29\x7d\x16\x88\x18\x0c\xea\x24\x68\x5b\x5a\x4f\x94\x04\x51\x10\xa4\x31\x1e\xc3\x7b\x5d\x70\x2e\x45\xc7\x92\x0b\xeb\x6a\xbd\x9f\xa3\xef\x94\x34\xb5\xc3\x96\xb9\xb3\xbd\xc7\x5c\xc5\xa8\xd0\x45\x4c\x7b\x2a\xaf\x40\xd9\xca\x66\x0d\xdf\x35\xf1\xa2\x0a\x16\xde\x93\xbb\xa6\xc5\x0b\xf0\x8b\x1a\x7e\xbb\xa5\x4b\xc2\xa7\x38\x4c\x7b\x90\x2f\x45\xc1\x5e\xd3\xf9\x7c\xb0\x6c\x54\x21\x2a\x6f\x06\xb6\xe7\x95\x0c\xd9\x35\xd1\x77\x9c\x3b\x53\xf0\x1c\xe2\x07\x65\x28\x83\x8d\x4a\xc1\x6b\x40\x8a\x39\x3a\x84\x1a\xea\x10\x36\x86\x08\x36\xc4\x7e\x92\xa1\x41\xd7\xa5\x87\xb5\x57\xfd\x8b\x41\x37\xc2\x7f\x97\x22\xb7\xfd\xc9\xa0\xed\x4d\x9d\xe6\xb4\xeb\x68\xab\xb0\x22\x98\x76\x29\x75\xd8\x00\xdb\x50\x3e\x5f\x18\x9d\xe8\x14\x1f\xc5\x10\x3d\x75\x1d\xea\x19\x59\x70\x22\x9d\x2e\xdf\x77\x8a\x4e\xdb\x7d\xb1\x13\x91\xe7\x8d\xde\x58\x95\x7f\x9d\xee\x6c\x90\x35\x6b\xe1\x9d\x7d\xc8\x91\x54\x29\xde\x5f\x64\x11\xd3\x00\x5e\xc3\xf3\x83\x1a\x43\xb3\x88\x88\x26\x14\x05\x12\x1d\x61\x00\x0d\x6b\x5a\xe1\xa8\x6a\x09\x78\x5f\xe8\x5d\xe1\x0a\xe3\x45\x11\x77\x7b\xd9\x12\x3c\x90\x50\xeb\xa5\x36\xd8\xb5\xc9\x7e\x95\xfc\x67\x42\xe6\xa5\xc1\xfd\x43\xe8\x08\x76\xb6\x34\x99\x48\x58\xc5\x2d\x02\xb7\x07\x2d\x58\xbd\xc4\x85\x5e\xf5\x3a\x38\x7a\xd8\x1d\x47\xb7\x0d\xa9\x6e\xef\xb7\xf3\x20\xbe\xa3\x12\x16\x4a\x2b\xe6\xd8\x30\xa4\xed\x18\xdf\x7d\x4e\x4f\x32\xb3\x2d\x53\x82\x67\xd0\x30\xc1\xa6\x05\x76\x99\xd8\xc3\xff\xae\xa1\x55\x5c\x47\xab\x69\x32\x5e\xf9\xb1\xc6\x24\x31\x5d\xab\x5d\x97\xc1\x05\x0e\x2f\xf9\xf8\xde\x08\x27\xfa\x95\x7d\x3e\xfc\x7f\xb4\xb1\xae\x1e\x40\xf4\x33\xc1\x8f\x0d\x06\x3e\xc6\xd7\x04\x36\x6f\x91\x1a\x3b\xb4\x4d\xa9\xe3\xee\x84\x8d\xe9\x3d\x73\xc0\x35\xb4\x70\x72\x96\x47\x43\xdc\x30\xe9\xc7\xac\x3f\x52\xff\x67\xf7\x02\xd1\xee\xb7\xac\xc2\xa7\x0e\x6d\xb3\xf0\x59\x44\x9d\x43\x7c\xd9\xa4\x1b\x59\xfb\x37\x93\xfb\x6f\xb6\xad\xb9\xc3\x44\x1f\x62\xee\x78\xa6\x3e\x61\xe2\x6a\xe7\xc3\x35\x18\x7d\x15\x06\xef\xa4\x2e\x29\x41\xc6\x27\xf7\x43\xc3\x02\xfe\xef\x35\x4c\xe0\x1f\xe0\x3b\x96\x30\xe5\x1f\x8f\xf5\x4c\x7f\x6f\xc7\xf4\xc9\xfd\xd2\x56\xb7\xb4\xaa\x57\x1f\xea\x0b\x29\x3e\xb2\xe6\x8d\x14\x57\xf3\x24\x03\x5f\xe9\x35\xb2\x2b\x9d\xf1\x8d\xae\x2f\xdd\x33\xff\x22\x61\x8f\xe1\x1f\xb9\x99\x0a\xbe\xdd\xe9\x82\x8a\x91\x90\xbc\xe5\x94\xa3\xaf\xab\x64\x7e\xe8\xcb\x20\x58\x08\x95\x86\x06\x94\x48\x53\xe9\x2f\xcd\x03\x85\x62\x2e\xa4\xea\x75\x0a\xf0\x8b\x15\x44\x97\xe2\x6c\xd5\xe5\xcd\x3c\x33\xb4\x0a\xd9\x6c\x09\x71\xef\x09\xf9\xe4\x86\xfd\x6c\x5e\xb2\xf5\x9e\xe6\x09\xbf\xe4\x06\xff\x53\x1f\xb8\xd9\xa9\xdc\xe5\x60\xd8\x44\xf8\x91\x93\xb2\xe5\x92\xdb\x0e\x20\x62\xdb\xcc\x17\xa4\x2a\x85\x24\x47\xa1\xfc\x93\x1d\xcc\x9c\xbe\x43\x63\x7b\x4f\x30\xda\x3f\x62\xb3\x1b\x91\x3b\x7e\xf6\xda\x0e\x70\x3c\x86\xcb\x98\x2b\xd0\x06\xed\x96\x49\x5d\xdf\xb5\x1e\x1e\xc4\xbb\xcb\xaf\xda\x47\xf9\xfa\x8d\x94\xdd\x62\x7b\x3c\x23\xe1\xa3\xec\x08\xd5\xcd\x00\x46\xc1\xed\xd1\xf6\x48\x63\xef\xaa\x33\x46\x0a\xf4\xd4\x34\xe7\xe9\xae\xdf\xeb\xdd\xdb\x5c\x38\x17\x1c\x51\xc3\x10\xbd\x87\x96\x8e\x9f\xd1\xa1\x72\xbd\xa7\xb9\x66\x2e\x3e\x83\x5b\xde\x34\xa4\xff\xbb\xcb\xab\xba\x7d\xb8\xe5\x8c\xce\xab\x42\x37\x30\xef\xb4\x1e\x42\x8e\x82\x7b\x81\xf1\x91\x5b\xbc\xa6\x79\xac\x37\x19\xfd\xbc\x2f\x8d\xb7\x1c\x3d\xdf\x28\x2e\x30\xde\x88\xf8\x16\xd4\x0c\x51\x81\x74\x68\x04\x29\x2b\x99\x75\x78\x97\x45\x54\x5a\x46\xc7\xe7\x22\xc9\x3d\x07\xc4\xe1\x91\x14\x59\x8e\x54\xf3\x51\x6f\xcf\x8f\x37\x22\x43\xe2\xee\xeb\xc8\xe0\x33\x5e\x86\x0c\x37\x06\xb3\x5c\x27\xb7\x53\x00\x48\xdc\xfd\x88\x3f\xb8\x63\x5e\xdd\x23\xd0\x30\x7d\xf0\xe8\xc6\x65\x02\xcd\xd1\x90\x6f\xb2\x6f\x5c\x1d\x30\x60\xb8\x3e\xa8\xee\xdc\x82\x01\xd1\xdc\x76\xeb\x9b\x97\x56\x97\x06\x1b\x2e\xca\xdd\x6f\x7b\xa8\x08\x40\xce\x69\xda\x0d\x40\x53\x1d\x40\x1b\xd7\x19\xb4\x98\x87\xfc\xac\xcf\xcb\xa7\xcd\x59\x3f\x14\x18\x95\xcb\x86\x6c\xe4\x92\x65\xc3\xf7\xd5\xfc\xdc\x83\xdc\xd8\x89\xbb\x6f\x09\xf8\x07\x61\x17\xd3\x5a\xc4\xf4\x39\xac\x26\xfd\x33\x8b\xc6\xb4\x1f\xf0\x7b\xd5\x4f\xb8\x6a\x1c\x1b\x83\x9b\x0b\xdf\x6b\xcb\x41\x7c\x6b\x71\x9c\xa8\xe8\x25\x67\xe9\xf3\x8e\x56\x77\xd1\xe0\xd2\x37\xea\xd8\x8d\xab\x14\x32\x69\xac\x23\xd5\x5c\x92\x0d\x24\xcd\x67\x7c\x42\x01\x2e\x0b\xb7\x06\xcd\xcf\x77\x3c\xd2\xd4\xe8\x22\xe8\x6a\x04\x1c\xf2\xab\x1d\xc3\xef\x60\x75\x4c\x39\x30\x9d\x93\x1b\xb2\x68\x6b\xdb\xc2\xa2\x3f\x80\x5c\xeb\x62\xe4\x71\xe1\xbd\x58\x16\xcd\xb5\xd3\xc6\x3d\xbe\x92\xbe\x99\x43\xde\xf1\xd5\xe4\x6f\xe2\xd5\x64\x32\xf9\xdb\xcb\x57\x93\xc9\x01\xfd\xa2\xff\xb3\x49\x96\x4d\x26\xfb\x43\xb0\x28\x4c\xb2\xe0\x7d\xd0\xba\x54\x38\xd1\xd9\x25\x27\x97\xdc\x9d\xd9\xbc\x86\x83\x6a\xb2\xf5\x5a\x69\xd3\xa1\x4d\x6e\x06\x9d\x3d\xd7\x91\x5d\xc8\xcc\xd5\xcf\x61\x3a\x7c\xe1\x24\x3a\xb5\xee\xdc\x89\x0c\xb7\xf2\x7a\x3b\x40\x1f\xc7\xfe\x58\x66\xc6\xd8\x63\x52\xb2\x03\xf4\xb0\xd7\x2e\x43\xdd\xfd\xd3\x51\x56\x8b\x9b\x24\xb6\xd6\xb4\x90\xf0\x6d\xf0\xd6\x74\x57\x53\x9a\xca\xed\xb0\xb0\x2e\xb8\xb9\xde\x0e\x84\x84\x80\xd7\x5a\x13\x89\x68\xbe\xc2\x64\xd7\x2a\x7f\xc5\xb0\xed\xb0\x32\xe6\xa6\x4b\x8f\x8b\xc0\xa0\x7f\xb2\xc4\x9d\x13\xf2\xe8\xde\x04\xa0\xb4\x52\xcd\x1b\xae\x3a\x45\x2b\x0d\x55\xa2\x12\xf3\x34\xd8\x40\xa6\x0d\x7c\xb2\x5a\xf9\xd7\x69\x68\x24\xa1\xf4\x8f\x65\xfd\xbb\x75\x7e\xc2\xab\x64\x82\x6e\x0d\x19\x0a\x7e\x66\xe6\x34\x77\xcc\x61\x89\x42\x49\x35\xcf\x4a\xca\x63\x18\x1f\x1b\x6d\x68\xc2\x52\x98\xd0\x50\x5a\x34\x16\x56\x0b\x1d\x92\x7c\xae\x2b\x0b\x83\x64\x8d\xc3\x70\x09\x26\x6d\x91\x8b\x35\x48\x47\x05\x45\xe0\xaa\x19\x39\xb8\x09\x14\x45\x30\xf4\x2f\x87\x43\x81\x5e\x87\x13\xb2\xa4\xc3\xde\x7f\xd2\xd4\xe5\x77\x94\x51\xe3\x58\xa2\x97\xcc\x4b\x0c\xa6\x21\xad\x2b\x8b\x54\x38\x04\x91\xb9\x90\x43\xfa\x55\x7c\x0f\xc3\x17\x0c\x22\xcb\x30\x71\xd6\x3f\x86\x23\xf1\x1b\xad\x1d\xd0\xae\x55\x2a\xe5\x49\xa8\x48\xdb\xd4\xe6\x16\x95\x5d\x0f\x74\x5a\x48\xae\x3e\x9c\x9d\x9c\xbd\xf1\x58\x5a\x4c\xd8\x52\x26\x32\xdd\xe0\xa2\x9d\x31\xb7\x78\xae\x78\xf9\xba\x1c\x77\x9c\x48\xf3\xc5\x48\x7b\x6a\xeb\xa5\xc4\x86\x30\x76\xdc\xdb\x56\x02\xa5\x99\x2a\x0b\xe3\xdc\xb7\xa9\x2e\x7c\x35\xdb\xf8\xfc\xed\xb7\x90\xb4\xf1\xd3\x43\x4d\x56\x1c\xe3\xa5\xcf\x3c\x2a\xe4\x23\xa7\xcf\xf5\x0a\xcd\x89\xb0\x18\x6e\xf3\x7d\x30\x9b\xb2\xe6\x8d\xc2\x23\xf4\xda\xdd\x84\xf1\x60\xc1\x34\xce\xce\x23\xa0\xe4\xdf\x31\x60\x56\xf4\x4c\x5b\xd4\xf1\xb4\x2d\x67\x3c\x66\xa7\x30\xd9\x1d\x60\xa3\x71\xec\x8a\xb2\xdb\xf1\xbb\x0b\x62\x47\x3a\x40\xf4\xf2\x08\x89\xab\x82\xdb\xcc\x10\x1a\xf9\x45\x7b\x4d\x9d\x1a\x70\xbe\xe2\x25\x1a\xb3\x95\x58\x9c\x78\xd9\x3f\xe2\xc3\x37\x2a\xdf\xe6\x0b\xe3\xd1\x42\xd8\x8b\x95\x7a\x6f\x74\x81\xc6\xad\x5b\xb8\xaa\x46\x69\x6b\x83\xa0\xf0\xdb\xa8\x3e\x36\x97\xdd\xb4\xee\x4c\xc2\x8c\x3f\xcf\xc3\xad\x3e\x76\xf5\x2c\xd8\xc7\xf7\x7f\xe2\xda\xa7\x12\x9d\xdb\x34\xdf\x44\xb7\x3a\xb9\x4f\x58\xbf\xc5\x6d\xdc\x8f\x5b\x94\x4d\xfa\xab\x56\x40\x63\x49\xbb\x21\xfa\x34\xa1\x34\x77\xff\x58\xe1\xba\x89\x8d\xc8\x9d\xf2\x69\x37\xfd\xe3\x69\x57\x82\xba\xf5\x22\xf2\x70\xcd\x53\x5e\x17\xa8\xb3\x30\xfe\xf1\x16\xd7\x37\xa1\x5e\xe5\xa8\x56\x99\x7d\x85\x47\x71\x51\xfd\xdf\x2d\x74\x0c\xd6\x92\x6c\x63\xfc\x63\x0d\x71\xb3\xa3\x6b\xdd\x66\x6a\x0b\x6a\xe7\x9d\xc6\xc6\x4e\xdd\xd8\xb7\x71\xb7\x1d\x56\xec\x35\xd9\xd8\xe8\xa8\xf2\xa9\xe8\x49\xbb\xf3\xb2\x80\x70\xbf\x72\x19\xfb\x37\x01\x83\x6d\x14\xc3\xd5\x16\x21\x50\x53\xdd\xea\x21\x6f\x0e\x7b\x5f\xdc\xa3\x92\xba\x3c\x9a\x1c\x82\xfc\x7b\x0b\x3b\xc8\x67\xcf\x5a\x6f\x35\x16\x32\x4f\x4f\x7c\xd7\x8c\x17\x7e\x94\x37\xf5\xbb\xa2\x37\x98\xe3\x5c\x38\xe4\x94\xa5\xa4\xdc\x9b\x22\x87\x7f\x80\xc3\x8d\x97\xaa\xc6\xf7\x44\xf5\x2b\x74\x8f\x45\x8b\xed\x35\xad\x90\x41\x89\xb3\xd7\xaf\x7a\x65\x78\xf3\x43\x6b\x2b\x76\xeb\x9e\xfe\xd6\x3a\x7f\x26\xfc\x71\xd8\x6c\x6e\xd7\xf2\x0c\xbf\x46\x89\x56\x89\x70\xfd\x76\x4a\x57\xe1\xdb\x95\xd2\x44\xb0\x8f\xf2\x66\xd0\xb8\x50\x6f\xe4\x88\x01\x7f\xcc\x05\x9b\xb9\xca\x46\xda\xe4\xb9\x08\x60\x9f\x9b\x41\x2b\x18\x51\x2c\xa4\xc3\x3f\x66\x8d\x06\x87\xb0\xf1\x6f\x3c\x86\x2b\x2a\x4c\x4c\x38\xdd\x50\x48\x37\xe1\x78\x70\x13\x70\x3c\x86\x9f\x68\x9c\xc1\xaa\xa2\xba\x09\x36\x17\x76\x6b\x37\x02\xfb\x5e\x84\xf7\x5f\x4a\xba\x2d\x1a\xb9\x4a\xee\xd8\xeb\x4c\x49\x57\xe7\xb0\x8d\x0b\xf6\xf0\x77\x42\x3f\xf2\x5b\xd8\xdd\x71\x9d\x91\x9c\xb0\x40\xe1\x3a\xe4\x2f\x0f\xad\xa8\xfe\x39\x72\xe2\xab\x7d\x68\x76\x26\x87\x91\x70\xfe\x33\x2f\xbf\xb1\x4e\x83\xa0\x1a\x57\x6d\x43\x4f\xec\x49\xa4\x4e\xc4\xf0\x1f\xe8\xd2\xc3\x9a\xa3\x63\x6b\xe5\x9c\xca\x90\xb0\xa8\xa1\x0f\xfe\xf4\xab\x7c\xec\x8f\x9f\xfd\xce\x63\x6f\x9f\x7a\xd5\x59\xd9\xa2\xb3\x05\x71\x89\x89\x2c\x64\x34\xdd\x86\xaa\xec\xd4\x92\xf1\x18\xae\xc3\xdf\xe1\x60\xda\xad\x2f\x3b\x55\xa5\xa5\x29\xa1\x9b\xf2\x88\x92\xb0\x8e\x50\x69\x16\x2a\x6f\x9f\x3f\x5e\xfb\x36\xd3\x4e\xad\x08\x7f\x80\xca\x3e\x45\xd7\xd7\xd5\x5f\xd4\x8e\x2f\x28\x47\xd5\xe0\xd9\xd6\x8d\x0b\x5f\x40\xce\xd6\x0e\xb7\x8e\xbc\x95\xd7\xff\xce\x53\xaf\x55\xad\xe3\xe8\xfd\x6b\xcf\xa8\x65\x7b\x06\xb3\x52\xa5\xc7\xdd\xca\xc9\x07\x4d\xf3\x4d\xc5\xdc\x0b\x7f\x5a\xd5\x46\x1f\x8f\x7c\x3c\x86\xf0\xbe\x7d\x5b\x70\xaa\x64\x71\x12\x8b\xbd\x87\xde\xff\x04\x00\x00\xff\xff\x21\xf8\xaf\x37\x60\x3d\x00\x00") +var _call_tracer_parityJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xdc\x3b\xef\x6f\x1b\x37\xb2\x9f\xad\xbf\x62\xce\x1f\xae\x12\xa2\x48\x72\xd2\xcb\x03\xe4\x73\x0e\xae\xe3\xb4\xc6\xb9\x71\x60\x3b\x2d\x8a\xc0\x78\x47\xed\xce\x4a\x8c\x57\xe4\x1e\xc9\xb5\xac\xa6\xfe\xdf\x1f\x66\x48\xee\x0f\x69\xe5\xb8\xbd\xe0\xbd\xe2\xe5\x4b\xb4\x24\x67\x38\x33\x9c\xdf\xa4\xc7\x63\x38\xd1\xc5\xda\xc8\xf9\xc2\xc1\x8b\xc9\xc1\x7f\xc1\xf5\x02\x61\xae\x9f\xa3\x5b\xa0\xc1\x72\x09\xc7\xa5\x5b\x68\x63\x7b\xe3\x31\x5c\x2f\xa4\x85\x4c\xe6\x08\xd2\x42\x21\x8c\x03\x9d\x81\xdb\x58\x9f\xcb\x99\x11\x66\x3d\xea\x8d\xc7\x1e\xa6\x73\x9a\x30\x64\x06\x11\xac\xce\xdc\x4a\x18\x9c\xc2\x5a\x97\x90\x08\x05\x06\x53\x69\x9d\x91\xb3\xd2\x21\x48\x07\x42\xa5\x63\x6d\x60\xa9\x53\x99\xad\x09\xa5\x74\x50\xaa\x14\x0d\x6f\xed\xd0\x2c\x6d\xa4\xe3\xfb\x77\x1f\xe0\x1c\xad\x45\x03\xdf\xa3\x42\x23\x72\x78\x5f\xce\x72\x99\xc0\xb9\x4c\x50\x59\x04\x61\xa1\xa0\x11\xbb\xc0\x14\x66\x8c\x8e\x00\xdf\x12\x29\x57\x81\x14\x78\xab\x4b\x95\x0a\x27\xb5\x1a\x02\x4a\xa2\x1c\xee\xd0\x58\xa9\x15\xbc\x8c\x5b\x05\x84\x43\xd0\x86\x90\xf4\x85\x23\x06\x0c\xe8\x82\xe0\x06\x20\xd4\x1a\x72\xe1\x6a\xd0\x27\x08\xa4\xe6\x3b\x05\xa9\x78\x9b\x85\x2e\x10\xdc\x42\x38\xe2\x7a\x25\xf3\x1c\x66\x08\xa5\xc5\xac\xcc\x87\x84\x6d\x56\x3a\xf8\xf9\xec\xfa\x87\x8b\x0f\xd7\x70\xfc\xee\x17\xf8\xf9\xf8\xf2\xf2\xf8\xdd\xf5\x2f\x87\xb0\x92\x6e\xa1\x4b\x07\x78\x87\x1e\x95\x5c\x16\xb9\xc4\x14\x56\xc2\x18\xa1\xdc\x1a\x74\x46\x18\x7e\x3c\xbd\x3c\xf9\xe1\xf8\xdd\xf5\xf1\x77\x67\xe7\x67\xd7\xbf\x80\x36\xf0\xf6\xec\xfa\xdd\xe9\xd5\x15\xbc\xbd\xb8\x84\x63\x78\x7f\x7c\x79\x7d\x76\xf2\xe1\xfc\xf8\x12\xde\x7f\xb8\x7c\x7f\x71\x75\x3a\x82\x2b\x24\xaa\x90\xe0\xbf\x2c\xf3\x8c\x4f\xcf\x20\xa4\xe8\x84\xcc\x6d\x94\xc4\x2f\xba\x04\xbb\xd0\x65\x9e\xc2\x42\xdc\x21\x18\x4c\x50\xde\x61\x0a\x02\x12\x5d\xac\x9f\x7c\xa8\x84\x4b\xe4\x5a\xcd\x99\xe7\x9d\x0a\x09\x67\x19\x28\xed\x86\x60\x11\xe1\xef\x0b\xe7\x8a\xe9\x78\xbc\x5a\xad\x46\x73\x55\x8e\xb4\x99\x8f\x73\x8f\xce\x8e\x5f\x8f\x7a\x84\x33\x11\x79\x7e\x6d\x44\x82\x86\x0e\x47\x40\x56\x92\xf8\x73\xbd\x52\xe0\x8c\x50\x56\x24\x74\xd4\xf4\x3b\x61\x65\x14\x0e\xf0\x9e\xbe\x9c\x25\xa5\x05\x83\x85\x36\xf4\x3b\xcf\xa3\x9e\x49\xe5\xd0\x28\x91\x33\x6e\x0b\x4b\x91\x22\xcc\xd6\x20\x9a\x08\x87\x4d\x66\x48\x8d\xfc\x71\x83\x54\x99\x36\x4b\x56\xcb\x51\xef\x73\x6f\x2f\x50\x68\x9d\x48\x6e\x89\x40\xc2\x9f\x94\xc6\xa0\x72\x24\xca\xd2\x58\x79\x87\xbc\x04\xfc\x9a\x20\xcf\xd3\x9f\x7e\x04\xbc\xc7\xa4\xf4\x98\xf6\x2a\x24\x53\xf8\xf8\xf9\xe1\x66\xd8\x63\xd4\x73\x74\x27\x71\xe2\x1c\xd5\xdc\x2d\xa0\xef\x75\x5b\xe4\x03\xda\xae\xb4\x98\xf2\xd1\xd2\xe8\x52\x5a\x26\x0c\x0c\x0a\xab\x95\x1d\x42\xb2\xc0\xe4\x56\xaa\x39\x64\x46\x2f\x99\x17\xa9\x60\xae\x19\xb7\xf4\x84\xfc\xcb\x3a\x2c\xfe\x05\x4b\x74\x0b\x4d\x2a\x60\xc1\x69\x52\x6f\x22\x28\xe0\x16\xf0\xd3\x8f\xa0\x8b\x44\xa7\x38\xea\xed\x6d\xd3\x34\x85\xac\x54\x2c\xb5\xfe\x00\x3e\x1b\x74\xa5\x21\x65\x97\x76\x54\x71\x35\xca\x79\xe5\xe1\x43\x60\x2c\x45\x9b\xa0\x4a\x31\xe5\x83\xbb\xb5\xb0\x5a\xb0\xaa\xc0\x0a\xbf\xb9\x43\xf8\x54\x5a\xd7\x58\xc3\xd4\x0b\x05\xba\x24\x53\x6e\x1e\xbb\x54\xce\x73\x23\xe8\xb7\x42\xc3\x74\x8f\x7a\x7b\x15\xf0\x14\x32\x91\x5b\xa4\x7d\x0b\x61\xa4\x5b\x9f\x1a\xa3\xcd\x8f\xa2\x28\xa4\x9a\x4f\xe1\x73\x6f\x6f\x6f\x3f\xd1\x8a\x35\x06\x12\x83\x5e\x82\xc4\x2b\x58\xa7\x8d\x98\x23\x6d\x4b\xc7\x36\x17\x76\x7f\x0a\xfb\x17\xf5\xd7\x90\x80\x1f\x9f\x9d\x0b\x0b\xa5\x54\xee\xd5\xb7\xa0\xef\xd0\x64\xb9\x5e\x75\x2d\x5b\x8a\xfb\xb0\xa7\xfc\x15\x01\xef\x13\xc4\x14\xd3\xae\x95\x52\xdd\x89\x5c\xa6\xf0\xa9\x5c\x16\x24\x22\x27\x15\x93\x4c\x6b\xbf\x13\x1d\xe3\x0c\x55\xa9\x1a\x18\xbc\x43\xe3\x3c\xee\xcb\xf8\x9b\xd7\x84\x83\x4b\x85\x13\x91\xe5\x19\xf9\xe0\x26\x5f\x61\x80\xd7\x7b\x7d\xce\xe5\x52\x92\xaa\x8b\x84\x9c\xf9\xc1\xe4\xc5\xb7\xd0\x3f\x98\xbc\x78\x39\x68\x40\xf1\x4a\x0f\x54\x18\x4c\xf4\xb2\x90\xac\x5b\x82\xfe\x63\xc2\x4b\x99\xbb\xe7\x52\xc5\xa1\x61\x6f\xef\xa1\xfb\xc4\xae\x9c\x30\x4e\xaa\xf9\xcf\x92\xf4\xee\x73\x53\x22\x5e\x43\xa7\x51\x10\x52\x59\x67\xca\xa4\x96\x81\xa7\x97\x83\x56\x3c\x86\xab\x8d\xa1\xf6\xbe\x57\xb7\xb2\x60\xd7\x63\xdf\x6a\xc3\x44\xd8\x29\x7c\xf4\x5b\xda\x32\xcb\x64\x22\xc9\xcc\x67\x22\x17\x2a\xf1\x1e\x96\x75\x33\x43\xb3\xdf\xdb\x63\x1b\x96\xf6\x62\xf6\x09\x13\x77\xba\x2c\xdc\xba\x61\x27\x7a\xf6\x69\xc0\xd4\x13\x50\xff\x4e\x18\xb8\xa7\x60\xe3\x87\x21\x9c\x04\x2b\xee\x21\x3c\xf4\xf6\xf6\xa2\x51\x99\x12\x0f\x03\x8d\xe3\x31\x90\xe9\x92\x17\x90\xea\x4e\xdf\x06\x63\xa5\x23\x5d\x07\x59\x78\x77\x48\x56\x5e\x79\x1b\xb4\xa3\xde\x1e\xc1\x35\x88\xc9\xf5\x7c\x08\xe9\xcc\x13\x44\xb9\x88\x28\x5c\x69\x90\xdd\x1e\x32\xdb\x20\x97\x4b\x4c\xa5\x70\x98\xaf\x7b\x7b\x7b\x44\x2f\x4f\xc0\x11\xe4\x7a\x3e\x9a\xa3\x63\xf1\xf4\x07\x87\xbd\xbd\x3d\x99\x41\xdf\xcf\xfe\xe5\xe8\x88\x85\x9b\x49\x85\xa9\x47\xbf\xc7\x7e\x21\x13\x65\xee\xaa\x7d\x09\x28\x70\x48\x3f\x1f\x3c\x15\x3f\x23\x68\x95\xaf\x21\xa1\x64\x40\xcc\x48\x23\xed\xda\x3a\x5c\x06\xe6\xec\x10\x32\x61\xc9\x21\xc8\x0c\x56\x08\x85\xc1\xe7\xec\xef\x40\xab\x04\x03\x95\x76\x6d\xd9\xf7\x1e\x01\xed\x36\xd2\xc5\xc8\xe9\x77\xe5\x72\x86\xa6\x3f\x80\xbf\xc2\xe4\x3e\x9b\x0c\xe0\xe8\x88\x7f\x44\xda\x03\x4c\xa0\x97\xb0\xe8\x22\x30\xca\xf0\x57\xce\x48\x35\xf7\xbc\x06\x5a\xcf\x32\x10\xa0\x70\x05\x95\x27\x91\x16\x66\x48\x9e\x97\x5d\x0a\xa6\x43\x10\x69\x4a\xae\x95\x03\x44\x15\x0e\xda\x5b\xc2\x5f\xff\x4a\xfe\x9d\x08\xda\x3f\xb9\x3c\x3d\xbe\x3e\xdd\x87\xdf\x7e\x83\xd6\xc8\x8b\xfd\x41\x83\x32\xa9\x2e\xb2\x2c\x10\xe7\x1d\x6d\x81\x78\xdb\x3f\x18\x8c\xee\x44\x5e\xe2\x45\xe6\xc9\x0c\x6b\x4f\x55\x0a\x47\x01\xe6\xd9\x26\xcc\x8b\x16\x0c\x01\x8d\xc7\x70\x6c\x2d\x2e\x67\x39\x6e\xc7\xcd\x10\x58\x39\xc6\x92\xa3\xf4\xfa\x4f\xb6\x9d\x23\x69\x55\xdc\x35\x88\x9f\x29\xde\x73\xeb\x02\xa7\x00\x00\xba\x18\xf2\x00\x79\x76\x1e\x70\xfa\x07\xbc\xe7\x33\x8a\x22\x24\xad\x3a\x4e\x53\x83\xd6\xf6\x07\x03\xbf\x5c\xaa\xa2\x74\xd3\xd6\xf2\x25\x2e\xb5\x59\x8f\x2c\xe5\x0d\x7d\x66\x6d\xe8\x39\x8d\x30\x73\x61\x79\x87\xa8\xa9\xc7\x77\x42\xe6\x62\x96\xe3\xf7\xc2\xf6\xeb\x35\x67\x6a\x5a\xaf\x69\x4f\x9d\x68\xeb\xa6\x71\x8a\x3e\xe2\x1c\xcb\x8b\xc0\xf6\x27\xf7\xfb\xdb\x12\x9d\x0c\x6a\x6d\x39\x78\x35\x20\x90\x87\xc3\xca\x06\xea\xd8\x58\x94\x76\xd1\x67\x95\xab\x67\xeb\xe0\x77\x14\xad\xbe\xc3\x46\x58\xef\xb6\x75\xce\x62\x9e\x71\x0c\x20\xff\x47\xba\x37\x17\x1c\x5c\xd9\x1d\x08\xca\xa2\x6c\x39\xe3\x83\x71\x5a\x7b\x4c\xef\x2e\xae\x4f\xa7\xf0\x4f\x24\x87\xe2\xc8\xdc\xee\xfc\x99\x6f\x10\x23\x33\x9f\x53\x6c\xeb\x6d\x50\xd2\xab\xd3\xf3\xb7\x6f\x4e\xaf\xae\x2f\x3f\x9c\x5c\xef\x37\x14\x35\xc7\xcc\x11\x2b\x9d\x59\x01\x2d\x22\x74\xed\xd9\x8f\x04\xf3\xfc\xe0\xc6\x8f\xc0\x51\x87\x33\xd9\x7b\x1c\x02\x3e\xde\x30\xee\x87\x6d\xa1\xb7\x97\xfa\x23\xf8\x3a\x3a\xea\xb4\x57\xb7\xb0\xdc\xe9\xb8\xe0\x71\xed\x18\x7c\x5d\x55\x4c\x67\xb4\xe2\x3b\x1f\x9a\x1e\xa1\x79\x5b\x43\x77\xb8\xe3\xca\xc5\x85\x4c\x91\x62\x4e\xe2\x93\xa5\x4a\xef\x52\xad\xf0\xf7\x3b\xba\xe3\xf3\xf3\x96\x9b\x3b\x3e\x3f\x3f\xb9\x78\xd3\x72\x7d\x6f\x4e\xcf\x4f\xbf\x3f\xbe\x3e\xdd\x5c\x7b\x75\x7d\x7c\x7d\x76\xc2\xa3\x4d\xaf\xe8\x34\xa9\xda\x2e\xc1\x1f\x6c\x08\xbe\x72\x76\x14\xef\x39\xe8\x71\x28\xf1\x59\x4a\x83\x4f\x3b\x04\xb7\xd0\x54\xbe\x9a\x90\xa1\x66\x42\x25\x31\xd6\xda\xa8\xc4\xd2\xbe\xaf\x73\x9c\xbe\xd3\x83\xc7\x98\xed\x60\xa0\x21\x7a\xaf\xb8\x1c\x81\xd8\xcb\xf7\x9f\x2e\x0e\xf8\x07\x4c\x60\x0a\x07\x81\xbb\x47\x62\xc5\x0b\x78\x46\xe8\xff\x40\xc4\x78\xd9\x01\xf9\xe7\x8c\x1b\x5b\x36\xf9\xe7\x8c\x27\xba\x74\x17\x59\x36\x85\x4d\x41\x7f\xbb\x25\xe8\x6a\xfd\x39\xaa\xed\xf5\x7f\xdb\x5a\x1f\x62\x4f\xd4\xd1\x2f\x99\x5e\x54\x45\x3a\x08\xc6\xd1\xa1\x36\x5e\x4d\xb8\x70\x1d\xc5\x35\xc1\xf9\xf0\x67\xcb\xc8\xfc\xd6\xac\x19\x29\x9d\xbb\x2c\x50\xa5\xd0\xe7\x04\x8f\x76\xfd\x2d\x6e\x4d\x85\xa0\x0a\x7b\xbe\x86\xc9\x20\x82\x5d\x5f\xbc\xb9\x98\x52\x99\x91\x92\xa3\xa1\xaa\x8a\x8b\x42\x85\xf7\x2e\x26\xbb\xd2\x82\x15\x99\xcf\x07\xe3\x0e\x1e\x51\xb2\x10\x6a\xee\x2d\x94\xd9\xaf\xd1\x07\x3e\x3d\x17\x84\xf5\x08\x66\x72\x7e\xa6\x5c\xbf\x1a\x79\x06\x2f\x5e\x4e\x26\x81\x5b\x36\xc8\x07\xc0\xdc\x22\x34\x04\xd9\x34\xe3\x80\x72\x53\x2e\x93\xfd\x60\xd1\x5f\x3b\x01\xe8\xac\x98\xa9\x2e\x6e\xd7\xc4\x43\xaa\x2a\x8c\xc4\x3b\x04\xe9\xbe\xb1\x8c\x13\x44\x9e\xeb\x15\x45\x88\x11\xfc\x8c\x1e\xa3\x42\x64\xf7\x1d\x9a\x28\xc4\x65\xb3\x79\x50\x79\x75\xc1\xbd\x0e\x83\xb0\x14\x6b\x98\x21\x55\x13\xb7\x6b\x3e\x98\x74\xad\xc4\x52\x26\xd6\xe3\xe3\x06\x8a\xc1\xb9\x30\x8c\xd6\xe0\xbf\x4b\xb4\x0e\x53\x76\x00\x22\x71\xa5\xc8\xf3\x35\xcc\xe5\x1d\x2a\x86\xee\x93\xb4\xe3\xf9\x0d\xe1\xd5\xcb\xf1\xab\x6f\xc1\x94\x39\x0e\x46\xbd\x46\x96\x50\xb1\x1a\xe4\x4d\x13\xc1\xa2\xde\x60\xe1\x16\xfd\x01\xbc\xde\x91\x6e\x34\x95\x3b\x78\x99\x8d\xdc\xa0\x13\x0c\x9e\xc3\x81\x4f\x27\x78\xb3\x5a\x63\xba\xf2\x92\xa6\x42\x35\x7d\xc0\xb6\x16\x7d\x6e\x6a\x78\xff\x56\x18\x91\x8b\x19\x0e\xa6\xdc\xa2\x64\xf2\x56\x22\xf4\xa8\xe8\x48\xa1\xc8\x85\x54\x20\x92\x44\x97\xca\xd1\xb1\xc5\x76\x53\xbe\xa6\xf8\xfb\x8d\x8b\xf8\xb8\x9b\x27\x92\x04\xad\x8d\xe1\x98\xcf\x9c\x88\x12\x4b\x82\xa6\x12\x59\xa6\xd8\x38\x53\xf2\xc9\x9a\x43\x60\x58\xb1\x92\x79\x1e\x11\x2e\xb5\xa5\x4d\x66\x08\x2b\xa3\x29\xcb\x94\x54\xf5\x4a\x52\x3b\x3a\x2b\x0b\x5a\x81\x80\x5c\x73\xc9\xcf\x9e\x15\x84\x99\xdb\x91\x8f\xab\x6c\xb2\xda\x80\xd2\xab\x51\x3b\x27\x6b\x6a\xba\x2f\x79\x83\x7e\x77\x67\x98\x97\xa7\x3f\x9d\x5e\x56\xb9\xe5\x93\x4f\x6e\x14\x0b\xd6\xae\x5e\x48\x15\xb7\xf8\x10\x7e\x95\x7a\x2e\x6c\xb2\x30\x03\xef\x71\x58\x40\xba\x74\xc4\x11\xdb\x82\xef\x24\x08\x4b\xcc\x53\xd8\x11\x52\xf9\xce\x9f\xdf\xa3\x10\xd6\xc6\xae\x15\xcb\x36\x26\xe8\x29\xde\x61\xae\x0b\x34\xdb\xb6\xbc\x8b\xd7\xeb\x0f\x97\xef\xf6\x77\xeb\xf8\xd1\x13\x74\xdc\x47\x95\x6d\x0f\x3e\xd9\x0c\xf9\x71\xf5\x39\xaa\x27\x94\x94\xbf\x43\xf4\x41\x76\x47\xbb\xc2\xac\xa7\x70\x18\x29\x7d\x16\x88\x18\x0c\xea\x24\x68\x5b\x5a\x4f\x94\x04\x51\x10\xa4\x31\x1e\xc3\x7b\x5d\x70\x2e\x45\xc7\x92\x0b\xeb\x6a\xbd\x9f\xa3\xef\x94\x34\xb5\xc3\x96\xb9\xb3\xbd\xc7\x5c\xc5\xa8\xd0\x45\x4c\x7b\x2a\xaf\x40\xd9\xca\x66\x0d\xdf\x35\xf1\xa2\x0a\x16\xde\x93\xbb\xa6\xc5\x0b\xf0\x8b\x1a\x7e\xbb\xa5\x4b\xc2\xa7\x38\x4c\x7b\x90\x2f\x45\xc1\x5e\xd3\xf9\x7c\xb0\x6c\x54\x21\x2a\x6f\x06\xb6\xe7\x95\x0c\xd9\x35\xd1\x77\x9c\x3b\x53\xf0\x1c\xe2\x07\x65\x28\x83\x8d\x4a\xc1\x6b\x40\x8a\x39\x3a\x84\x1a\xea\x10\x36\x86\x08\x36\xc4\x7e\x92\xa1\x41\xd7\xa5\x87\xb5\x57\xfd\x8b\x41\x37\xc2\x7f\x97\x22\xb7\xfd\xc9\xa0\xed\x4d\x9d\xe6\xb4\xeb\x68\xab\xb0\x22\x98\x76\x29\x75\xd8\x00\xdb\x50\x3e\x5f\x18\x9d\xe8\x14\x1f\xc5\x10\x3d\x75\x1d\xea\x19\x59\x70\x22\x9d\x2e\xdf\x77\x8a\x4e\xdb\x7d\xb1\x13\x91\xe7\x8d\xde\x58\x95\x7f\x9d\xee\x6c\x90\x35\x6b\xe1\x9d\x7d\xc8\x91\x54\x29\xde\x5f\x64\x11\xd3\x00\x5e\xc3\xf3\x83\x1a\x43\xb3\x88\x88\x26\x14\x05\x12\x1d\x61\x00\x0d\x6b\x5a\xe1\xa8\x6a\x09\x78\x5f\xe8\x5d\xe1\x0a\xe3\x45\x11\x77\x7b\xd9\x12\x3c\x90\x50\xeb\xa5\x36\xd8\xb5\xc9\x7e\x95\xfc\x67\x42\xe6\xa5\xc1\xfd\x43\xe8\x08\x76\xb6\x34\x99\x48\x58\xc5\x2d\x02\xb7\x07\x2d\x58\xbd\xc4\x85\x5e\xf5\x3a\x38\x7a\xd8\x1d\x47\xb7\x0d\xa9\x6e\xef\xb7\xf3\x20\xbe\xa3\x12\x16\x4a\x2b\xe6\xd8\x30\xa4\xed\x18\xdf\x7d\x4e\x4f\x32\xb3\x2d\x53\x82\x67\xd0\x30\xc1\xa6\x05\x76\x99\xd8\xc3\xff\xae\xa1\x55\x5c\x47\xab\x69\x32\x5e\xf9\xb1\xc6\x24\x31\x5d\xab\x5d\x97\xc1\x05\x0e\x2f\xf9\xf8\xde\x08\x27\xfa\x95\x7d\x3e\xfc\x7f\xb4\xb1\xae\x1e\x40\xf4\x33\xc1\x8f\x0d\x06\x3e\xc6\xd7\x04\x36\x6f\x91\x1a\x3b\xb4\x4d\xa9\xe3\xee\x84\x8d\xe9\x3d\x73\xc0\x35\xb4\x70\x72\x96\x47\x43\xdc\x30\xe9\xc7\xac\x3f\x52\xff\x67\xf7\x02\xd1\xee\xb7\xac\xc2\xa7\x0e\x6d\xb3\xf0\x59\x44\x9d\x43\x7c\xd9\xa4\x1b\x59\xfb\x37\x93\xfb\x6f\xb6\xad\xb9\xc3\x44\x1f\x62\xee\x78\xa6\x3e\x61\xe2\x6a\xe7\xc3\x35\x18\x7d\x15\x06\xef\xa4\x2e\x29\x41\xc6\x27\xf7\x43\xc3\x02\xfe\xef\x35\x4c\xe0\x1f\xe0\x3b\x96\x30\xe5\x1f\x8f\xf5\x4c\x7f\x6f\xc7\xf4\xc9\xfd\xd2\x56\xb7\xb4\xaa\x57\x1f\xea\x0b\x29\x3e\xb2\xe6\x8d\x14\x57\xf3\x24\x03\x5f\xe9\x35\xb2\x2b\x9d\xf1\x8d\xae\x2f\xdd\x33\xff\x22\x61\x8f\xe1\x1f\xb9\x99\x0a\xbe\xdd\xe9\x82\x8a\x91\x90\xbc\xe5\x94\xa3\xaf\xab\x64\x7e\xe8\xcb\x20\x58\x08\x95\x86\x06\x94\x48\x53\xe9\x2f\xcd\x03\x85\x62\x2e\xa4\xea\x75\x0a\xf0\x8b\x15\x44\x97\xe2\x6c\xd5\xe5\xcd\x3c\x33\xb4\x0a\xd9\x6c\x09\x71\xef\x09\xf9\xe4\x86\xfd\x6c\x5e\xb2\xf5\x9e\xe6\x09\xbf\xe4\x06\xff\x53\x1f\xb8\xd9\xa9\xdc\xe5\x60\xd8\x44\xf8\x91\x93\xb2\xe5\x92\xdb\x0e\x20\x62\xdb\xcc\x17\xa4\x2a\x85\x24\x47\xa1\xfc\x93\x1d\xcc\x9c\xbe\x43\x63\x7b\x4f\x30\xda\x3f\x62\xb3\x1b\x91\x3b\x7e\xf6\xda\x0e\x70\x3c\x86\xcb\x98\x2b\xd0\x06\xed\x96\x49\x5d\xdf\xb5\x1e\x1e\xc4\xbb\xcb\xaf\xda\x47\xf9\xfa\x8d\x94\xdd\x62\x7b\x3c\x23\xe1\xa3\xec\x08\xd5\xcd\x00\x46\xc1\xed\xd1\xf6\x48\x63\xef\xaa\x33\x46\x0a\xf4\xd4\x34\xe7\xe9\xae\xdf\xeb\xdd\xdb\x5c\x38\x17\x1c\x51\xc3\x10\xbd\x87\x96\x8e\x9f\xd1\xa1\x72\xbd\xa7\xb9\x66\x2e\x3e\x83\x5b\xde\x34\xa4\xff\xbb\xcb\xab\xba\x7d\xb8\xe5\x8c\xce\xab\x42\x37\x30\xef\xb4\x1e\x42\x8e\x82\x7b\x81\xf1\x91\x5b\xbc\xa6\x79\xac\x37\x19\xfd\xbc\x2f\x8d\xb7\x1c\x3d\xdf\x28\x2e\x30\xde\x88\xf8\x16\xd4\x0c\x51\x81\x74\x68\x04\x29\x2b\x99\x75\x78\x97\x45\x54\x5a\x46\xc7\xe7\x22\xc9\x3d\x07\xc4\xe1\x91\x14\x59\x8e\x54\xf3\x51\x6f\xcf\x8f\x37\x22\x43\xe2\xee\xeb\xc8\xe0\x33\x5e\x86\x0c\x37\x06\xb3\x5c\x27\xb7\x53\x00\x48\xdc\xfd\x88\x3f\xb8\x63\x5e\xdd\x23\xd0\x30\x7d\xf0\xe8\xc6\x65\x02\xcd\xd1\x90\x6f\xb2\x6f\x5c\x1d\x30\x60\xb8\x3e\xa8\xee\xdc\x82\x01\xd1\xdc\x76\xeb\x9b\x97\x56\x97\x06\x1b\x2e\xca\xdd\x6f\x7b\xa8\x08\x40\xce\x69\xda\x0d\x40\x53\x1d\x40\x1b\xd7\x19\xb4\x98\x87\xfc\xac\xcf\xcb\xa7\xcd\x59\x3f\x14\x18\x95\xcb\x86\x6c\xe4\x92\x65\xc3\xf7\xd5\xfc\xdc\x83\xdc\xd8\x89\xbb\x6f\x09\xf8\x07\x61\x17\xd3\x5a\xc4\xf4\x39\xac\x26\xfd\x33\x8b\xe9\xe6\x09\xd4\x8f\xb7\x6a\xe8\x8d\xc1\xcd\x85\xef\xb5\xe5\xf0\xbd\xb5\x38\x4e\x54\x94\x92\x9b\xf4\x19\x47\xab\xaf\x68\x70\xe9\x5b\x74\xec\xc0\x55\x0a\x99\x34\xd6\x91\x52\x2e\x49\xfb\x93\xe6\x03\x3e\xa1\x00\x97\x85\x5b\x83\xe6\x87\x3b\x1e\x69\x6a\x74\x11\xb4\x34\x02\x0e\xf9\xbd\x8e\xe1\x17\xb0\x3a\x26\x1b\x98\xce\xc9\x01\x59\xb4\xb5\x55\x61\xd1\x1f\x40\xae\x75\x31\xf2\xb8\xf0\x5e\x2c\x8b\xe6\xda\x69\xe3\x06\x5f\x49\xdf\xc6\x21\xbf\xf8\x6a\xf2\x37\xf1\x6a\x32\x99\xfc\xed\xe5\xab\xc9\xe4\x80\x7e\xd1\xff\xd9\x24\xcb\x26\x93\xfd\x21\x58\x14\x26\x59\xf0\x3e\x68\x5d\x2a\x9c\xe8\xec\x8f\x93\x33\xee\xce\x69\x5e\xc3\x41\x35\xd9\x7a\xa7\xb4\xe9\xca\x26\x37\x83\xce\x6e\xeb\xc8\x2e\x64\xe6\xea\x87\x30\x1d\x5e\x70\x12\xdd\x59\x77\xd6\x44\x26\x5b\xf9\xbb\x1d\xa0\x8f\x63\x7f\x2c\x27\x63\xec\x31\x1d\xd9\x01\x7a\xd8\x6b\x17\xa0\xee\xfe\xe9\x28\xab\xc5\x4d\x12\x5b\x6b\x5a\x48\xf8\x1e\x78\x6b\xba\xab\x1d\x4d\x85\x76\x58\x58\x97\xda\x5c\x69\x07\x42\x42\xa8\x6b\xad\x89\x44\x34\xdf\x5f\xb2\x53\x95\xbf\x62\xd8\x76\x58\x99\x71\xd3\x99\xc7\x45\x60\xd0\x3f\x56\xe2\x9e\x09\xf9\x72\x6f\x02\x50\x5a\xa9\xe6\x0d\x27\x9d\xa2\x95\x86\x6a\x50\x89\x79\x1a\x6c\x20\xd3\x06\x3e\x59\xad\xfc\xbb\x34\x34\x92\x50\xfa\x67\xb2\xfe\xc5\x3a\x3f\xde\x55\x32\x41\xb7\x86\x0c\x05\x3f\x30\x73\x9a\x7b\xe5\xb0\x44\xa1\xa4\x9a\x67\x25\x65\x30\x8c\x8f\x8d\x36\xb4\x5f\x29\x40\x68\x28\x2d\x1a\x0b\xab\x85\x0e\xe9\x3d\x57\x94\x85\x41\xb2\xc6\x61\xb8\xfe\x92\xb6\xc8\xc5\x1a\xa4\xa3\x52\x22\x70\xd5\x8c\x19\xdc\xfe\x89\x22\x18\xfa\x37\xc3\xa1\x34\xaf\x03\x09\x59\xd2\x61\xef\x3f\x69\xe7\xf2\x0b\xca\xa8\x71\x2c\xd1\x4b\xe6\x25\x86\xd1\x90\xd0\x95\x45\x2a\x1c\x82\xc8\x5c\xc8\x1e\xfd\x2a\xbe\x81\xe1\xab\x05\x91\x65\x98\x38\xeb\x9f\xc1\x91\xf8\x8d\xd6\x0e\x68\xd7\x2a\x89\xf2\x24\x54\xa4\x6d\x6a\x73\x8b\xca\xae\xa7\x39\x2d\x24\x57\x1f\xce\x4e\xce\xde\x78\x2c\x2d\x26\x6c\x29\x13\x99\x6e\x70\xd1\xce\x95\x5b\x3c\x57\xbc\x7c\x5d\x8e\x3b\x4e\xa4\xf9\x56\xa4\x3d\xb5\xf5\x46\x62\x43\x18\x3b\x6e\x6c\x2b\x81\xd2\x4c\x95\x7f\x71\xd6\xdb\x54\x17\xbe\x94\x6d\x7c\xfe\xf6\x5b\x48\xd7\xf8\xd1\xa1\x26\x2b\x8e\x91\xd2\xe7\x1c\x15\xf2\x91\xd3\xe7\x7a\x85\xe6\x44\x58\x0c\xf7\xf8\x3e\x98\x4d\x59\xf3\x46\xe1\xf9\x79\xed\x6e\xc2\x78\xb0\x60\x1a\x67\xe7\x11\x50\xf2\xef\x18\x30\x2b\x7a\xa6\x2d\xea\x78\xda\x96\x33\x1e\xb3\x53\x98\xec\x0e\xb0\xd1\x38\x76\x45\xd9\xed\xf8\xdd\x05\xd1\x9d\x08\x54\x2b\x1b\xa3\xc3\x8d\x5c\xa2\xbd\xa6\x4e\x06\x38\x37\xf1\x32\x8c\x99\x49\x2c\x44\xbc\xb4\x1f\xf1\xda\x1b\x55\x6e\xf3\x35\xf1\x68\x21\xec\xc5\x4a\xbd\x37\xba\x40\xe3\xd6\x2d\x5c\x55\x53\xb4\xb5\x41\x50\xf1\x6d\x54\x1f\x9b\xcb\x6e\x5a\xf7\x23\x61\xc6\x9f\xe0\xe1\x56\xcf\xba\x7a\x02\xec\x23\xfa\x3f\x71\xed\x93\x87\xce\x6d\x9a\xef\x9f\x5b\x5d\xdb\x27\xac\xdf\xe2\x36\xee\xc7\xed\xc8\x26\xfd\x55\xd9\xdf\x58\xd2\x6e\x7e\x3e\x4d\x28\xcd\xdd\x3f\x56\xb8\x6e\x62\xd3\x71\xa7\x7c\xda\x0d\xfe\x78\xda\x95\xa0\x6e\xbd\x88\x3c\x5c\xf3\x94\xd7\x05\xea\x2c\x8c\x7f\xbc\xc5\xf5\x4d\xa8\x4d\x39\x8e\x55\x86\x5e\xe1\x51\x5c\x40\xff\x77\x0b\x1d\x83\xb5\x24\xdb\x18\xff\x58\x43\xdc\xec\xe8\x50\xb7\x99\xda\x82\xda\x79\x7f\xb1\xb1\x53\x37\xf6\x6d\xdc\x6d\x17\x15\xfb\x4a\x36\x36\x35\xaa\x0c\x2a\xfa\xce\xee\x4c\x2c\x20\xdc\xaf\x9c\xc4\xfe\x4d\xc0\x60\x1b\x85\x6f\xb5\x45\x08\xcd\x54\xa3\x7a\xc8\x9b\xc3\xde\x17\xf7\xa8\xa4\x2e\x8f\x26\x87\x20\xff\xde\xc2\x0e\xf2\xd9\xb3\xd6\xbb\x8c\x85\xcc\xd3\x13\xdf\x21\xe3\x85\x1f\xe5\x4d\xfd\x86\xe8\x0d\xe6\x38\x17\x0e\x39\x49\x29\x29\xdb\xa6\x58\xe1\x1f\xdb\x70\x93\xa5\xaa\xe7\x3d\x51\xfd\x0a\xdd\x63\xf1\x61\x7b\x4d\x2b\x48\x50\xaa\xec\xf5\xab\x5e\x19\xde\xf7\xd0\xda\x8a\xdd\xba\x7f\xbf\xb5\xce\x9f\x09\x7f\x1c\x36\x1b\xd9\xb5\x3c\xc3\xaf\x51\xa2\x55\x22\x5c\xbf\x9d\xc4\x55\xf8\x76\x25\x31\x11\xec\xa3\xbc\x19\x34\x2e\xcf\x1b\x59\x61\xc0\x1f\xb3\xbf\x66\x76\xb2\x91\x28\x79\x2e\x02\xd8\xe7\x66\x98\x0a\x46\x14\x8b\xe6\xf0\x8f\x59\xa3\xc1\x21\x6c\xfc\x1b\x8f\xe1\x8a\x4a\x11\x13\x4e\x37\x14\xcd\x4d\x38\x1e\xdc\x04\x1c\x8f\xe1\x27\x1a\x67\xb0\xaa\x80\x6e\x82\xcd\x85\xdd\xda\x8d\xc0\xbe\x17\xe1\xad\x97\x92\x6e\x8b\x46\xae\x88\x3b\xf6\x3a\x53\xd2\xd5\x59\x6b\xe3\x32\x3d\xfc\x4d\xd0\x8f\xfc\xee\x75\x77\x24\x67\x24\x27\x2c\x50\xb8\x0e\x19\xcb\x43\x2b\x8e\x7f\x8e\x9c\xf8\xca\x1e\x9a\x5d\xc8\x61\x24\x9c\xff\xa4\xcb\x6f\xac\xd3\x20\xa8\xc6\xb5\xda\xd0\x13\x7b\x12\xa9\x13\x31\xe0\x07\xba\xf4\xb0\xe6\xe8\xd8\x5a\x39\xa7\xc2\x23\x2c\x6a\xe8\x83\x3f\xfd\x2a\x03\xfb\xe3\x67\xbf\xf3\xd8\xdb\xa7\x5e\x75\x51\xb6\xe8\x6c\x41\x5c\x62\x22\x0b\x19\x4d\xb7\xa1\x2a\x3b\xb5\x64\x3c\x86\xeb\xf0\x37\x37\x98\x76\xeb\xcb\x4e\x55\x69\x69\x4a\xe8\x9c\x3c\xa2\x24\xac\x23\x54\x8c\x85\x5a\xdb\x67\x8c\xd7\xbe\xa5\xb4\x53\x2b\xc2\x1f\x9b\xb2\x4f\xd1\xf5\xd5\xf4\x17\xb5\xe3\x0b\xca\x51\x35\x73\xb6\x75\xe3\xc2\x97\x8c\xb3\xb5\xc3\xad\x23\x6f\x65\xf2\xbf\xf3\xd4\x6b\x55\xeb\x38\x7a\xff\xb2\x33\x6a\xd9\x9e\xc1\xac\x54\xe9\x71\xb7\x72\xf2\x41\xd3\x7c\x53\x31\xf7\xc2\x9f\x51\xb5\xd1\xc7\x23\x1f\x8f\x21\xbc\x65\xdf\x16\x9c\x2a\x59\x9c\xc4\x62\xef\xa1\xf7\x3f\x01\x00\x00\xff\xff\x16\xa0\x42\x98\x4c\x3d\x00\x00") func call_tracer_parityJsBytes() ([]byte, error) { return bindataRead( diff --git a/eth/tracers/internal/tracers/call_tracer_parity.js b/eth/tracers/internal/tracers/call_tracer_parity.js index c32a11e33c..3a81228aaf 100644 --- a/eth/tracers/internal/tracers/call_tracer_parity.js +++ b/eth/tracers/internal/tracers/call_tracer_parity.js @@ -312,7 +312,7 @@ }; var extraCtx = { blockHash: ctx.blockHash, - blockNumber: ctx.blockNumber, + blockNumber: ctx.block, transactionHash: ctx.transactionHash, transactionPosition: ctx.transactionPosition, }; @@ -368,7 +368,7 @@ subtraces: 0, transactionPosition: extraCtx.transactionPosition, transactionHash: extraCtx.transactionHash, - blockNumber: call.block || extraCtx.blockNumber, + blockNumber: extraCtx.blockNumber, blockHash: extraCtx.blockHash, time: call.time, } diff --git a/eth/tracers/tracer.go b/eth/tracers/tracer.go index dc69425c2d..84e0ee8297 100644 --- a/eth/tracers/tracer.go +++ b/eth/tracers/tracer.go @@ -295,8 +295,6 @@ func (cw *contractWrapper) pushObject(vm *duktape.Context) { // Tracer provides an implementation of Tracer that evaluates a Javascript // function for each VM execution step. type Tracer struct { - inited bool // Flag whether the context was already inited from the EVM - vm *duktape.Context // Javascript VM instance tracerObject int // Stack index of the tracer JavaScript object @@ -573,7 +571,7 @@ func wrapError(context string, err error) error { // CapturePreEVM implements the Tracer interface to bootstrap the tracing context, // before EVM init. This is useful for reading initial balance, state, etc. -func (jst *Tracer) CapturePreEVM(env *vm.EVM, inputs map[string]interface{}) error { +func (jst *Tracer) CapturePreEVM(env *vm.EVM, inputs map[string]interface{}) { jst.dbWrapper.db = env.StateDB for key, val := range inputs { @@ -585,15 +583,13 @@ func (jst *Tracer) CapturePreEVM(env *vm.EVM, inputs map[string]interface{}) err _, err := jst.call("init", "ctx", "db") if err != nil { jst.err = wrapError("init", err) - return nil + return } } - - return nil } // CaptureStart implements the Tracer interface to initialize the tracing operation. -func (jst *Tracer) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error { +func (jst *Tracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { jst.ctx["type"] = "CALL" if create { jst.ctx["type"] = "CREATE" @@ -604,128 +600,117 @@ func (jst *Tracer) CaptureStart(from common.Address, to common.Address, create b jst.ctx["gas"] = gas jst.ctx["value"] = value - return nil + // Initialize the context + jst.ctx["block"] = env.Context.BlockNumber.Uint64() + jst.dbWrapper.db = env.StateDB + // Compute intrinsic gas + hasEIP2 := env.ChainConfig().IsEnabled(env.ChainConfig().GetEIP2Transition, env.Context.BlockNumber) + hasEIP2028 := env.ChainConfig().IsEnabled(env.ChainConfig().GetEIP2028Transition, env.Context.BlockNumber) + intrinsicGas, err := core.IntrinsicGas(input, nil, jst.ctx["type"] == "CREATE", hasEIP2, hasEIP2028) + if err != nil { + return + } + jst.ctx["intrinsicGas"] = intrinsicGas } // CaptureState implements the Tracer interface to trace a single step of VM execution. -func (jst *Tracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, rdata []byte, contract *vm.Contract, depth int, err error) error { - if jst.err == nil { - // Initialize the context if it wasn't done yet - if !jst.inited { - jst.ctx["block"] = env.Context.BlockNumber.Uint64() - // Compute intrinsic gas - isHomestead := env.ChainConfig().IsEnabled(env.ChainConfig().GetEIP2Transition, env.Context.BlockNumber) - isIstanbul := env.ChainConfig().IsEnabled(env.ChainConfig().GetEIP2028Transition, env.Context.BlockNumber) - var input []byte - if data, ok := jst.ctx["input"].([]byte); ok { - input = data - } - intrinsicGas, err := core.IntrinsicGas(input, nil, jst.ctx["type"] == "CREATE", isHomestead, isIstanbul) - if err != nil { - return err - } - jst.ctx["intrinsicGas"] = intrinsicGas - jst.inited = true - } - // If tracing was interrupted, set the error and stop - if atomic.LoadUint32(&jst.interrupt) > 0 { - jst.err = jst.reason - return nil - } - jst.opWrapper.op = op - jst.stackWrapper.stack = stack - jst.memoryWrapper.memory = memory - jst.contractWrapper.contract = contract - jst.dbWrapper.db = env.StateDB - - *jst.pcValue = uint(pc) - *jst.gasValue = uint(gas) - *jst.availableGasValue = uint(env.CallGasTemp) - *jst.costValue = uint(cost) - *jst.depthValue = uint(depth) - *jst.returnData = rdata - *jst.refundValue = uint(env.StateDB.GetRefund()) - - jst.opErrorValue = nil - if env.CallErrorTemp != nil { - jst.opErrorValue = new(string) - *jst.opErrorValue = env.CallErrorTemp.Error() - - env.CallErrorTemp = nil // clean temp error storage, for debug tracing - } - jst.errorValue = nil - if err != nil { - jst.errorValue = new(string) - *jst.errorValue = err.Error() - } +func (jst *Tracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { + if jst.err != nil { + return + } + // If tracing was interrupted, set the error and stop + if atomic.LoadUint32(&jst.interrupt) > 0 { + jst.err = jst.reason + return + } + jst.opWrapper.op = op + jst.stackWrapper.stack = scope.Stack + jst.memoryWrapper.memory = scope.Memory + jst.contractWrapper.contract = scope.Contract + + *jst.pcValue = uint(pc) + *jst.gasValue = uint(gas) + *jst.availableGasValue = uint(env.CallGasTemp) + *jst.costValue = uint(cost) + *jst.depthValue = uint(depth) + *jst.returnData = rData + *jst.refundValue = uint(env.StateDB.GetRefund()) + + jst.opErrorValue = nil + if env.CallErrorTemp != nil { + jst.opErrorValue = new(string) + *jst.opErrorValue = env.CallErrorTemp.Error() - // Checks wether tracer supports `getCallstackLength` method in order to achieve optimal performance for call_tracer* - // in which case it checks if the call to `step` method has to be made, as the duktape prop call is an expensive operation - if jst.supportsStepPerfOptimisations { - run := false - - if jst.callTracerCallstackLength == nil { - jst.vm.PushString("getCallstackLength") - code := jst.vm.PcallProp(jst.tracerObject, 0) - if code != 0 { - jst.vm.Pop() - err := jst.vm.SafeToString(-1) - jst.err = wrapError("step", errors.New(err)) - return nil - } - - jst.callTracerCallstackLength = new(uint) - *jst.callTracerCallstackLength = jst.vm.GetUint(-1) + env.CallErrorTemp = nil // clean temp error storage, for debug tracing + } + jst.errorValue = nil + if err != nil { + jst.errorValue = new(string) + *jst.errorValue = err.Error() + } + + // Checks wether tracer supports `getCallstackLength` method in order to achieve optimal performance for call_tracer* + // in which case it checks if the call to `step` method has to be made, as the duktape prop call is an expensive operation + if jst.supportsStepPerfOptimisations { + run := false + + if jst.callTracerCallstackLength == nil { + jst.vm.PushString("getCallstackLength") + code := jst.vm.PcallProp(jst.tracerObject, 0) + if code != 0 { jst.vm.Pop() + err := jst.vm.SafeToString(-1) + jst.err = wrapError("step", errors.New(err)) + return } - if *jst.callTracerCallstackLength-1 == uint(depth) { - run = true - } else if jst.handleNextOpCode { - jst.handleNextOpCode = false - run = true - } else if op&0xf0 == 0xf0 { - jst.handleNextOpCode = true - run = true - } + jst.callTracerCallstackLength = new(uint) + *jst.callTracerCallstackLength = jst.vm.GetUint(-1) + jst.vm.Pop() + } - if !run { - return nil - } else { - jst.callTracerCallstackLength = nil - } + if *jst.callTracerCallstackLength-1 == uint(depth) { + run = true + } else if jst.handleNextOpCode { + jst.handleNextOpCode = false + run = true + } else if op&0xf0 == 0xf0 { + jst.handleNextOpCode = true + run = true } - _, err := jst.call("step", "log", "db") - if err != nil { - jst.err = wrapError("step", err) + if !run { + return + } else { + jst.callTracerCallstackLength = nil } } - return nil + + if _, err := jst.call("step", "log", "db"); err != nil { + jst.err = wrapError("step", err) + } } // CaptureFault implements the Tracer interface to trace an execution fault -// while running an opcode. -func (jst *Tracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error { - if jst.err == nil { - // clean up op error when CaptureFault is being called directly - jst.opErrorValue = nil - env.CallErrorTemp = nil // clean temp error storage, for debug tracing +func (jst *Tracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { + if jst.err != nil { + return + } + // clean up op error when CaptureFault is being called directly + jst.opErrorValue = nil + env.CallErrorTemp = nil // clean temp error storage, for debug tracing - // Apart from the error, everything matches the previous invocation - jst.errorValue = new(string) - *jst.errorValue = err.Error() + // Apart from the error, everything matches the previous invocation + jst.errorValue = new(string) + *jst.errorValue = err.Error() - _, err := jst.call("fault", "log", "db") - if err != nil { - jst.err = wrapError("fault", err) - } + if _, err := jst.call("fault", "log", "db"); err != nil { + jst.err = wrapError("fault", err) } - return nil } // CaptureEnd is called after the call finishes to finalize the tracing. -func (jst *Tracer) CaptureEnd(env *vm.EVM, output []byte, gasUsed uint64, t time.Duration, err error) error { +func (jst *Tracer) CaptureEnd(env *vm.EVM, output []byte, gasUsed uint64, t time.Duration, err error) { jst.dbWrapper.db = env.StateDB jst.ctx["output"] = output @@ -735,7 +720,6 @@ func (jst *Tracer) CaptureEnd(env *vm.EVM, output []byte, gasUsed uint64, t time if err != nil { jst.ctx["error"] = err.Error() } - return nil } // addCtxIntoState adds/updates the ctx vars in the duktape context diff --git a/eth/tracers/tracer_test.go b/eth/tracers/tracer_test.go index 16eb4443a5..ac449fadbc 100644 --- a/eth/tracers/tracer_test.go +++ b/eth/tracers/tracer_test.go @@ -47,7 +47,8 @@ type dummyStatedb struct { state.StateDB } -func (*dummyStatedb) GetRefund() uint64 { return 1337 } +func (*dummyStatedb) GetRefund() uint64 { return 1337 } +func (*dummyStatedb) GetBalance(addr common.Address) *big.Int { return new(big.Int) } type vmContext struct { blockCtx vm.BlockContext @@ -67,7 +68,7 @@ func runTrace(tracer *Tracer, vmctx *vmContext) (json.RawMessage, error) { contract := vm.NewContract(account{}, account{}, value, startGas) contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x1, 0x0} - tracer.CaptureStart(contract.Caller(), contract.Address(), false, []byte{}, startGas, value) + tracer.CaptureStart(env, contract.Caller(), contract.Address(), false, []byte{}, startGas, value) ret, err := env.Interpreter().Run(contract, []byte{}, false) tracer.CaptureEnd(env, ret, startGas-contract.Gas, 1, err) if err != nil { @@ -150,14 +151,55 @@ func TestHaltBetweenSteps(t *testing.T) { t.Fatal(err) } env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) - contract := vm.NewContract(&account{}, &account{}, big.NewInt(0), 0) + scope := &vm.ScopeContext{ + Contract: vm.NewContract(&account{}, &account{}, big.NewInt(0), 0), + } - tracer.CaptureState(env, 0, 0, 0, 0, nil, nil, nil, contract, 0, nil) + tracer.CaptureState(env, 0, 0, 0, 0, scope, nil, 0, nil) timeout := errors.New("stahp") tracer.Stop(timeout) - tracer.CaptureState(env, 0, 0, 0, 0, nil, nil, nil, contract, 0, nil) + tracer.CaptureState(env, 0, 0, 0, 0, scope, nil, 0, nil) if _, err := tracer.GetResult(); err.Error() != timeout.Error() { t.Errorf("Expected timeout error, got %v", err) } } + +// TestNoStepExec tests a regular value transfer (no exec), and accessing the statedb +// in 'result' +func TestNoStepExec(t *testing.T) { + runEmptyTrace := func(tracer *Tracer, vmctx *vmContext) (json.RawMessage, error) { + env := vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) + startGas := uint64(10000) + contract := vm.NewContract(account{}, account{}, big.NewInt(0), startGas) + tracer.CaptureStart(env, contract.Caller(), contract.Address(), false, []byte{}, startGas, big.NewInt(0)) + tracer.CaptureEnd(env, nil, startGas-contract.Gas, 1, nil) + return tracer.GetResult() + } + execTracer := func(code string) []byte { + t.Helper() + ctx := &vmContext{blockCtx: vm.BlockContext{BlockNumber: big.NewInt(1)}, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}} + tracer, err := New(code, ctx.txCtx) + if err != nil { + t.Fatal(err) + } + ret, err := runEmptyTrace(tracer, ctx) + if err != nil { + t.Fatal(err) + } + return ret + } + for i, tt := range []struct { + code string + want string + }{ + { // tests that we don't panic on accessing the db methods + code: "{depths: [], step: function() {}, fault: function() {}, result: function(ctx, db){ return db.getBalance(ctx.to)} }", + want: `"0"`, + }, + } { + if have := execTracer(tt.code); tt.want != string(have) { + t.Errorf("testcase %d: expected return value to be %s got %s\n\tcode: %v", i, tt.want, string(have), tt.code) + } + } +} diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go index 78a840099c..8542ff2fc4 100644 --- a/ethclient/ethclient_test.go +++ b/ethclient/ethclient_test.go @@ -295,8 +295,9 @@ func testHeader(t *testing.T, chain []*types.Block, client *rpc.Client) { want: chain[1].Header(), }, "future_block": { - block: big.NewInt(1000000000), - want: nil, + block: big.NewInt(1000000000), + want: nil, + wantErr: ethereum.NotFound, }, } for name, tt := range tests { @@ -306,10 +307,10 @@ func testHeader(t *testing.T, chain []*types.Block, client *rpc.Client) { defer cancel() got, err := ec.HeaderByNumber(ctx, tt.block) - if tt.wantErr != nil && (err == nil || err.Error() != tt.wantErr.Error()) { + if !errors.Is(err, tt.wantErr) { t.Fatalf("HeaderByNumber(%v) error = %q, want %q", tt.block, err, tt.wantErr) } - if got != nil && got.Number.Sign() == 0 { + if got != nil && got.Number != nil && got.Number.Sign() == 0 { got.Number = big.NewInt(0) // hack to make DeepEqual work } if !reflect.DeepEqual(got, tt.want) { @@ -886,8 +887,10 @@ var allRPCMethods = []string{ "admin_peerEvents", "admin_removePeer", "admin_removeTrustedPeer", + "admin_startHTTP", "admin_startRPC", "admin_startWS", + "admin_stopHTTP", "admin_stopRPC", "admin_stopWS", "debug_accountRange", @@ -944,6 +947,7 @@ var allRPCMethods = []string{ "eth_call", "eth_chainId", "eth_coinbase", + "eth_createAccessList", "eth_estimateGas", "eth_etherbase", "eth_fillTransaction", @@ -991,7 +995,7 @@ var allRPCMethods = []string{ "eth_sendTransaction", "eth_sign", "eth_signTransaction", - "eth_submitHashRate", + "eth_submitHashrate", "eth_submitWork", "eth_subscribe", "eth_syncing", @@ -999,9 +1003,8 @@ var allRPCMethods = []string{ "eth_unsubscribe", "ethash_getHashrate", "ethash_getWork", - "ethash_submitHashRate", + "ethash_submitHashrate", "ethash_submitWork", - "miner_getHashrate", "miner_setEtherbase", "miner_setExtra", "miner_setGasPrice", diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go index 70ac7a91ac..5d19cc3577 100644 --- a/ethdb/leveldb/leveldb.go +++ b/ethdb/leveldb/leveldb.go @@ -83,7 +83,7 @@ type Database struct { // New returns a wrapped LevelDB object. The namespace is the prefix that the // metrics reporting should use for surfacing internal stats. -func New(file string, cache int, handles int, namespace string) (*Database, error) { +func New(file string, cache int, handles int, namespace string, readonly bool) (*Database, error) { return NewCustom(file, namespace, func(options *opt.Options) { // Ensure we have some minimal caching and file guarantees if cache < minCache { @@ -96,6 +96,9 @@ func New(file string, cache int, handles int, namespace string) (*Database, erro options.OpenFilesCacheCapacity = handles options.BlockCacheCapacity = cache / 2 * opt.MiB options.WriteBuffer = cache / 4 * opt.MiB // Two of these are used internally + if readonly { + options.ReadOnly = true + } }) } @@ -458,7 +461,7 @@ func (b *batch) Put(key, value []byte) error { // Delete inserts the a key removal into the batch for later committing. func (b *batch) Delete(key []byte) error { b.b.Delete(key) - b.size++ + b.size += len(key) return nil } diff --git a/ethdb/memorydb/memorydb.go b/ethdb/memorydb/memorydb.go index 4c5e1a84de..fedc9e326c 100644 --- a/ethdb/memorydb/memorydb.go +++ b/ethdb/memorydb/memorydb.go @@ -211,7 +211,7 @@ func (b *batch) Put(key, value []byte) error { // Delete inserts the a key removal into the batch for later committing. func (b *batch) Delete(key []byte) error { b.writes = append(b.writes, keyvalue{common.CopyBytes(key), nil, true}) - b.size += 1 + b.size += len(key) return nil } diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go index f5ffa2bc8d..ef83e5a4eb 100644 --- a/ethstats/ethstats.go +++ b/ethstats/ethstats.go @@ -94,6 +94,8 @@ type Service struct { pongCh chan struct{} // Pong notifications are fed into this channel histCh chan []uint64 // History request block numbers are fed into this channel + headSub event.Subscription + txSub event.Subscription } // connWrapper is a wrapper to prevent concurrent-write or concurrent-read on the @@ -188,7 +190,12 @@ func New(node *node.Node, backend backend, engine consensus.Engine, url string) // Start implements node.Lifecycle, starting up the monitoring and reporting daemon. func (s *Service) Start() error { - go s.loop() + // Subscribe to chain events to execute updates on + chainHeadCh := make(chan core.ChainHeadEvent, chainHeadChanSize) + s.headSub = s.backend.SubscribeChainHeadEvent(chainHeadCh) + txEventCh := make(chan core.NewTxsEvent, txChanSize) + s.txSub = s.backend.SubscribeNewTxsEvent(txEventCh) + go s.loop(chainHeadCh, txEventCh) log.Info("Stats daemon started") return nil @@ -196,22 +203,15 @@ func (s *Service) Start() error { // Stop implements node.Lifecycle, terminating the monitoring and reporting daemon. func (s *Service) Stop() error { + s.headSub.Unsubscribe() + s.txSub.Unsubscribe() log.Info("Stats daemon stopped") return nil } // loop keeps trying to connect to the netstats server, reporting chain events // until termination. -func (s *Service) loop() { - // Subscribe to chain events to execute updates on - chainHeadCh := make(chan core.ChainHeadEvent, chainHeadChanSize) - headSub := s.backend.SubscribeChainHeadEvent(chainHeadCh) - defer headSub.Unsubscribe() - - txEventCh := make(chan core.NewTxsEvent, txChanSize) - txSub := s.backend.SubscribeNewTxsEvent(txEventCh) - defer txSub.Unsubscribe() - +func (s *Service) loop(chainHeadCh chan core.ChainHeadEvent, txEventCh chan core.NewTxsEvent) { // Start a goroutine that exhausts the subscriptions to avoid events piling up var ( quitCh = make(chan struct{}) @@ -244,9 +244,9 @@ func (s *Service) loop() { } // node stopped - case <-txSub.Err(): + case <-s.txSub.Err(): break HandleLoop - case <-headSub.Err(): + case <-s.headSub.Err(): break HandleLoop } } @@ -775,7 +775,7 @@ func (s *Service) reportStats(conn *connWrapper) error { fullBackend, ok := s.backend.(fullNodeBackend) if ok { mining = fullBackend.Miner().Mining() - hashrate = int(fullBackend.Miner().HashRate()) + hashrate = int(fullBackend.Miner().Hashrate()) sync := fullBackend.Downloader().Progress() syncing = fullBackend.CurrentHeader().Number.Uint64() >= sync.HighestBlock diff --git a/go.mod b/go.mod index 017807b665..427c411c1b 100644 --- a/go.mod +++ b/go.mod @@ -1,16 +1,19 @@ module github.com/ethereum/go-ethereum -go 1.13 +go 1.15 require ( github.com/Azure/azure-storage-blob-go v0.10.0 + github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect github.com/VictoriaMetrics/fastcache v1.5.7 github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921 - github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 // indirect - github.com/aws/aws-sdk-go v1.34.21 + github.com/aws/aws-sdk-go-v2 v1.2.0 + github.com/aws/aws-sdk-go-v2/config v1.1.1 + github.com/aws/aws-sdk-go-v2/credentials v1.1.1 + github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1 github.com/btcsuite/btcd v0.21.0-beta github.com/cespare/cp v1.1.1 - github.com/cloudflare/cloudflare-go v0.13.2 + github.com/cloudflare/cloudflare-go v0.14.0 github.com/consensys/gurvy v0.3.8 github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea @@ -23,6 +26,7 @@ require ( github.com/fatih/color v1.7.0 github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff + github.com/go-kit/kit v0.10.0 // indirect github.com/go-ole/go-ole v1.2.4 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/go-stack/stack v1.8.0 @@ -31,46 +35,51 @@ require ( github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3 github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa github.com/google/uuid v1.1.5 + github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 // indirect github.com/gorilla/websocket v1.4.2 github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d github.com/holiman/bloomfilter/v2 v2.0.3 github.com/holiman/uint256 v1.1.1 - github.com/huin/goupnp v1.0.1-0.20200620063722-49508fba0031 + github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88 github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 github.com/influxdata/influxdb v1.8.3 github.com/jackpal/go-nat-pmp v1.0.2 github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e github.com/julienschmidt/httprouter v1.3.0 github.com/karalabe/usb v0.0.0-20191104083709-911d15fe12a9 - github.com/ledgerwatch/turbo-geth v0.0.0-20210308133656-9d397082536c + github.com/kr/pretty v0.2.0 // indirect github.com/mattn/go-colorable v0.1.7 github.com/mattn/go-isatty v0.0.12 github.com/mitchellh/go-homedir v1.1.0 github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 - github.com/olekukonko/tablewriter v0.0.4 + github.com/olekukonko/tablewriter v0.0.5 github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 github.com/peterh/liner v1.2.0 github.com/prometheus/tsdb v0.10.0 github.com/rjeczalik/notify v0.9.1 github.com/rs/cors v1.7.0 github.com/shirou/gopsutil v2.20.5+incompatible + github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac // indirect github.com/spf13/cobra v1.1.1 github.com/spf13/viper v1.7.0 github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969 github.com/stretchr/testify v1.7.0 - github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca + github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 github.com/tidwall/gjson v1.6.0 github.com/tidwall/pretty v1.0.0 github.com/tyler-smith/go-bip39 v1.0.2 github.com/xeipuuv/gojsonschema v1.2.0 golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5 + golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 // indirect golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c - golang.org/x/text v0.3.3 - golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e + golang.org/x/text v0.3.4 + golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 gonum.org/v1/gonum v0.8.1 gonum.org/v1/plot v0.8.0 + google.golang.org/protobuf v1.25.0 // indirect + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 gopkg.in/urfave/cli.v1 v1.20.0 diff --git a/go.sum b/go.sum index 3260a4eaea..240b144d91 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,3 @@ -bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -29,22 +28,27 @@ github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuI github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= github.com/Azure/azure-storage-blob-go v0.10.0 h1:evCwGreYo3XLeBV4vSxLbLiYb6e0SzsJiXQVRGsRXxs= github.com/Azure/azure-storage-blob-go v0.10.0/go.mod h1:ep1edmW+kNQx4UfWM9heESNmQdijykocJ0YOxmMX8SE= +github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.8.3 h1:O1AGG9Xig71FxdX9HO5pGNyZ7TbSyHaVg+5eJO/jSGw= github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/DataDog/datadog-go v0.0.0-20180822151419-281ae9f2d895/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/JekaMas/notify v0.9.4/go.mod h1:KYZd45vBSOYP2/9lY38EjZtvKRZMfgWaJk8bvBxhIYk= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= @@ -55,13 +59,7 @@ github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tN github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w= -github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= -github.com/RoaringBitmap/roaring v0.4.18/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= -github.com/RoaringBitmap/roaring v0.4.21/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/RoaringBitmap/roaring v0.5.6-0.20201124195327-6ec715d630bc/go.mod h1:WZ83fjBF/7uBHi6QoFyfGL4+xuV4Qn+xFkm4+vSzrhE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= @@ -72,7 +70,6 @@ github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6Ro github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af h1:wVe6/Ea46ZMeNkQjjBW6xcqyQA/j5e0D6GytH95g0gQ= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921 h1:T3+cD5fYvuH36h7EZq+TDpm+d8a6FSD4pQsbmuGGQ8o= @@ -81,97 +78,47 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alexflint/go-arg v1.1.0/go.mod h1:3Rj4baqzWaGGmZA2+bVTV8zQOZEjBQAPBnL5xLT+ftY= -github.com/alexflint/go-arg v1.2.0/go.mod h1:3Rj4baqzWaGGmZA2+bVTV8zQOZEjBQAPBnL5xLT+ftY= -github.com/alexflint/go-scalar v1.0.0/go.mod h1:GpHzbCOZXEKMEcygYQ5n/aa4Aq84zbxjy3MxYW0gjYw= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= -github.com/anacrolix/dht v0.0.0-20180412060941-24cbf25b72a4/go.mod h1:hQfX2BrtuQsLQMYQwsypFAab/GvHg8qxwVi4OJdR1WI= -github.com/anacrolix/dht/v2 v2.0.1/go.mod h1:GbTT8BaEtfqab/LPd5tY41f3GvYeii3mmDUK300Ycyo= -github.com/anacrolix/dht/v2 v2.2.1-0.20191103020011-1dba080fb358/go.mod h1:d7ARx3WpELh9uOEEr0+8wvQeVTOkPse4UU6dKpv4q0E= -github.com/anacrolix/dht/v2 v2.3.2-0.20200103043204-8dce00767ebd/go.mod h1:cgjKyErDnKS6Mej5D1fEqBKg3KwFF2kpFZJp3L6/fGI= -github.com/anacrolix/dht/v2 v2.5.1-0.20200317023935-129f05e9b752/go.mod h1:7RLvyOjm+ZPA7vgFRP+1eRjFzrh27p/nF0VCk5LcjoU= -github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c= -github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c= -github.com/anacrolix/envpprof v1.0.1/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4= -github.com/anacrolix/envpprof v1.1.0/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4= -github.com/anacrolix/go-libutp v0.0.0-20180522111405-6baeb806518d/go.mod h1:beQSaSxwH2d9Eeu5ijrEnHei5Qhk+J6cDm1QkWFru4E= -github.com/anacrolix/go-libutp v1.0.2/go.mod h1:uIH0A72V++j0D1nnmTjjZUiH/ujPkFxYWkxQ02+7S0U= -github.com/anacrolix/log v0.0.0-20180412014343-2323884b361d/go.mod h1:sf/7c2aTldL6sRQj/4UKyjgVZBu2+M2z9wf7MmwPiew= -github.com/anacrolix/log v0.3.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= -github.com/anacrolix/log v0.3.1-0.20190913000754-831e4ffe0174/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= -github.com/anacrolix/log v0.3.1-0.20191001111012-13cede988bcd/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= -github.com/anacrolix/log v0.4.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= -github.com/anacrolix/log v0.5.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= -github.com/anacrolix/log v0.6.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= -github.com/anacrolix/log v0.7.0/go.mod h1:s5yBP/j046fm9odtUTbHOfDUq/zh1W8OkPpJtnX0oQI= -github.com/anacrolix/missinggo v0.0.0-20180522035225-b4a5853e62ff/go.mod h1:b0p+7cn+rWMIphK1gDH2hrDuwGOcbB6V4VXeSsEfHVk= -github.com/anacrolix/missinggo v0.0.0-20180725070939-60ef2fbf63df/go.mod h1:kwGiTUTZ0+p4vAz3VbAI5a30t2YbvemcmspjKwrAz5s= -github.com/anacrolix/missinggo v0.2.1-0.20190310234110-9fbdc9f242a8/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo= -github.com/anacrolix/missinggo v1.1.0/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo= -github.com/anacrolix/missinggo v1.1.2-0.20190815015349-b888af804467/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo= -github.com/anacrolix/missinggo v1.2.1/go.mod h1:J5cMhif8jPmFoC3+Uvob3OXXNIhOUikzMt+uUjeM21Y= -github.com/anacrolix/missinggo/perf v1.0.0/go.mod h1:ljAFWkBuzkO12MQclXzZrosP5urunoLS0Cbvb4V0uMQ= -github.com/anacrolix/missinggo/v2 v2.2.0/go.mod h1:o0jgJoYOyaoYQ4E2ZMISVa9c88BbUBVQQW4QeRkNCGY= -github.com/anacrolix/missinggo/v2 v2.2.1-0.20191103010835-12360f38ced0/go.mod h1:ZzG3/cc3t+5zcYWAgYrJW0MBsSwNwOkTlNquBbP51Bc= -github.com/anacrolix/missinggo/v2 v2.3.0/go.mod h1:ZzG3/cc3t+5zcYWAgYrJW0MBsSwNwOkTlNquBbP51Bc= -github.com/anacrolix/missinggo/v2 v2.3.1/go.mod h1:3XNH0OEmyMUZuvXmYdl+FDfXd0vvSZhvOLy8CFx8tLg= -github.com/anacrolix/missinggo/v2 v2.4.1-0.20200227072623-f02f6484f997/go.mod h1:KY+ij+mWvwGuqSuecLjjPv5LFw5ICUc1UvRems3VAZE= -github.com/anacrolix/mmsg v0.0.0-20180515031531-a4a3ba1fc8bb/go.mod h1:x2/ErsYUmT77kezS63+wzZp8E3byYB0gzirM/WMBLfw= -github.com/anacrolix/mmsg v1.0.0/go.mod h1:x8kRaJY/dCrY9Al0PEcj1mb/uFHwP6GCJ9fLl4thEPc= -github.com/anacrolix/multiless v0.0.0-20191223025854-070b7994e841/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4= -github.com/anacrolix/stm v0.1.0/go.mod h1:ZKz7e7ERWvP0KgL7WXfRjBXHNRhlVRlbBQecqFtPq+A= -github.com/anacrolix/stm v0.1.1-0.20191106051447-e749ba3531cf/go.mod h1:zoVQRvSiGjGoTmbM0vSLIiaKjWtNPeTvXUSdJQA4hsg= -github.com/anacrolix/stm v0.2.0/go.mod h1:zoVQRvSiGjGoTmbM0vSLIiaKjWtNPeTvXUSdJQA4hsg= -github.com/anacrolix/sync v0.0.0-20171108081538-eee974e4f8c1/go.mod h1:+u91KiUuf0lyILI6x3n/XrW7iFROCZCG+TjgK8nW52w= -github.com/anacrolix/sync v0.0.0-20180611022320-3c4cb11f5a01/go.mod h1:+u91KiUuf0lyILI6x3n/XrW7iFROCZCG+TjgK8nW52w= -github.com/anacrolix/sync v0.0.0-20180808010631-44578de4e778/go.mod h1:s735Etp3joe/voe2sdaXLcqDdJSay1O0OPnM0ystjqk= -github.com/anacrolix/sync v0.2.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g= -github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= -github.com/anacrolix/tagflag v0.0.0-20180605133421-f477c8c2f14c/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= -github.com/anacrolix/tagflag v0.0.0-20180803105420-3a8ff5428f76/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= -github.com/anacrolix/tagflag v1.0.0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= -github.com/anacrolix/tagflag v1.0.1/go.mod h1:gb0fiMQ02qU1djCSqaxGmruMvZGrMwSReidMB0zjdxo= -github.com/anacrolix/torrent v0.0.0-20180622074351-fefeef4ee9eb/go.mod h1:3vcFVxgOASslNXHdivT8spyMRBanMCenHRpe0u5vpBs= -github.com/anacrolix/torrent v1.7.1/go.mod h1:uvOcdpOjjrAq3uMP/u1Ide35f6MJ/o8kMnFG8LV3y6g= -github.com/anacrolix/torrent v1.9.0/go.mod h1:jJJ6lsd2LD1eLHkUwFOhy7I0FcLYH0tHKw2K7ZYMHCs= -github.com/anacrolix/torrent v1.11.0/go.mod h1:FwBai7SyOFlflvfEOaM88ag/jjcBWxTOqD6dVU/lKKA= -github.com/anacrolix/torrent v1.15.2/go.mod h1:sJtcAZtlGaZLo7wCXT/EZV+hATsq0Bg6pVhhzACY0E0= -github.com/anacrolix/upnp v0.1.1/go.mod h1:LXsbsp5h+WGN7YR+0A7iVXm5BL1LYryDev1zuJMWYQo= -github.com/anacrolix/utp v0.0.0-20180219060659-9e0e1d1d0572/go.mod h1:MDwc+vsGEq7RMw6lr2GKOEqjWny5hO5OZXRVNaBJ2Dk= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/aristanetworks/fsnotify v1.4.2/go.mod h1:D/rtu7LpjYM8tRJphJ0hUBYpjai8SfX+aSNsWDTq/Ks= -github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA= github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= -github.com/aristanetworks/goarista v0.0.0-20200812190859-4cb0e71f3c0e/go.mod h1:QZe5Yh80Hp1b6JxQdpfSEEe8X7hTyTEZSosSrFf/oJE= -github.com/aristanetworks/splunk-hec-go v0.3.3/go.mod h1:1VHO9r17b0K7WmOlLb9nTk/2YanvOEnLMUgsFrxBROc= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.34.21 h1:M97FXuiJgDHwD4mXhrIZ7RJ4xXV6uZVPvIC2qb+HfYE= -github.com/aws/aws-sdk-go v1.34.21/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= +github.com/aws/aws-sdk-go-v2 v1.2.0 h1:BS+UYpbsElC82gB+2E2jiCBg36i8HlubTB/dO/moQ9c= +github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJWl8FwAuQo= +github.com/aws/aws-sdk-go-v2/config v1.1.1 h1:ZAoq32boMzcaTW9bcUacBswAmHTbvlvDJICgHFZuECo= +github.com/aws/aws-sdk-go-v2/config v1.1.1/go.mod h1:0XsVy9lBI/BCXm+2Tuvt39YmdHwS5unDQmxZOYe8F5Y= +github.com/aws/aws-sdk-go-v2/credentials v1.1.1 h1:NbvWIM1Mx6sNPTxowHgS2ewXCRp+NGTzUYb/96FZJbY= +github.com/aws/aws-sdk-go-v2/credentials v1.1.1/go.mod h1:mM2iIjwl7LULWtS6JCACyInboHirisUUdkBPoTHMOUo= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2 h1:EtEU7WRaWliitZh2nmuxEXrN0Cb8EgPUFGIoTMeqbzI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2/go.mod h1:3hGg3PpiEjHnrkrlasTfxFqUsZ2GCk/fMUn4CbKgSkM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2 h1:4AH9fFjUlVktQMznF+YN33aWNXaR4VgDXyP28qokJC0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2/go.mod h1:45MfaXZ0cNbeuT0KQ1XJylq8A6+OpVV2E5kvY/Kq+u8= +github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1 h1:cKr6St+CtC3/dl/rEBJvlk7A/IN5D5F02GNkGzfbtVU= +github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1/go.mod h1:rLiOUrPLW/Er5kRcQ7NkwbjlijluLsrIbu/iyl35RO4= +github.com/aws/aws-sdk-go-v2/service/sso v1.1.1 h1:37QubsarExl5ZuCBlnRP+7l1tNwZPBSTqpTBrPH98RU= +github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbEWkXs7QRTQpCLGaKIprQW0= +github.com/aws/aws-sdk-go-v2/service/sts v1.1.1 h1:TJoIfnIFubCX0ACVeJ0w46HEH5MwjwYN4iFhuYIhfIY= +github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM= +github.com/aws/smithy-go v1.1.0 h1:D6CSsM3gdxaGaqXnPgOBCeL6Mophqzu7KJOu7zW78sU= +github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blend/go-sdk v1.1.1/go.mod h1:IP1XHXFveOXHRnojRJO7XvqWGqyzevtXND9AdSztAe8= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo= -github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo= -github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og= github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M= @@ -187,7 +134,6 @@ github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= -github.com/c2h5oh/datasize v0.0.0-20200825124411-48ed595a09d2/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -205,15 +151,14 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= -github.com/cloudflare/cloudflare-go v0.13.2 h1:bhMGoNhAg21DuqJjU9jQepRRft6vYfo6pejT3NN4V6A= -github.com/cloudflare/cloudflare-go v0.13.2/go.mod h1:27kfc1apuifUmJhp069y0+hwlKDg4bd8LWlu7oKeZvM= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cloudflare/cloudflare-go v0.14.0 h1:gFqGlGl/5f9UGXAaKapCGUfaTCgRKKnzu2VvzMZlOFA= +github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= github.com/consensys/bavard v0.1.8-0.20210105233146-c16790d2aa8b/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= github.com/consensys/goff v0.3.10/go.mod h1:xTldOBEHmFiYS0gPXd3NsaEqZWlnmeWcRLWgD3ba3xc= +github.com/consensys/gurvy v0.3.8 h1:H2hvjvT2OFMgdMn5ZbhXqHt+F8DJ2clZW7Vmc0kFFxc= github.com/consensys/gurvy v0.3.8/go.mod h1:sN75xnsiD593XnhbhvG2PkOy194pZBzqShWF/kwuW/g= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -235,6 +180,7 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0= github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= @@ -243,33 +189,22 @@ github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf h1:sh8rkQZavChcmakYiSlqu2425CHyFXLZZnvm7PDpU8M= github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dop251/goja v0.0.0-20200219165308-d1232e640a87/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498 h1:Y9vTBSsV4hSwPSj4bacAU/eSnV3dAxVpepaghAdhGoQ= github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs= -github.com/elliotchance/orderedmap v1.2.0/go.mod h1:8hdSl6jmveQw8ScByd3AaNHNk51RhbTazdqtTty+NFw= -github.com/emicklei/dot v0.11.0/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/etclabscore/go-jsonschema-walk v0.0.6 h1:DrNzoKWKd8f8XB5nFGBY00IcjakRE22OTI12k+2LkyY= github.com/etclabscore/go-jsonschema-walk v0.0.6/go.mod h1:VdfDY72AFAiUhy0ZXEaWSpveGjMT5JcDIm903NGqFwQ= github.com/etclabscore/go-openrpc-reflect v0.0.35 h1:BHr8JJtMB46eVVc3sAUuLH3s9YFp8KsBEm3VkRF6mGY= @@ -281,44 +216,36 @@ github.com/ethereum/go-ethereum v1.9.25/go.mod h1:vMkFiYLHI4tgPw4k2j4MHKoovchFE8 github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fjl/gencodec v0.0.0-20191126094850-e283372f291f/go.mod h1:q+7Z5oyy8cvKF3TakcuihvQvBHFTnXjB+7UP1e2Q+1o= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= -github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-fonts/dejavu v0.1.0 h1:JSajPXURYqpr+Cu8U9bt8K+XcACIHWqWrvWCKyeFmVQ= github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= -github.com/go-gl/gl v0.0.0-20180407155706-68e253793080/go.mod h1:482civXOzJJCPzJ4ZOX/pwvXBWSnzD4OKMdH4ClKGbk= -github.com/go-gl/glfw v0.0.0-20180426074136-46a8d530c326/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-latex/latex v0.0.0-20200518072620-0806b477ea35 h1:uroDDLmuCK5Pz5J/Ef5vCL6F0sJmAtZFTm0/cF027F4= github.com/go-latex/latex v0.0.0-20200518072620-0806b477ea35/go.mod h1:PNI+CcWytn/2Z/9f1SGOOYn0eILruVyp0v2/iAs8asQ= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= @@ -343,7 +270,6 @@ github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyL github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.5 h1:AKODKU3pDH1RzZzm6YZu77YWtEAq6uh1rLIAQlay2qc= @@ -370,7 +296,6 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -384,7 +309,6 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8l github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3 h1:ur2rms48b3Ep1dxh7aUV2FZEQ8jEVO2F6ILKx8ofkAg= github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= @@ -392,7 +316,10 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa h1:Q75Upo5UN4JbPFURXZ8nLKYUvF85dyFRop/vQ0Rv+64= github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -403,14 +330,12 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.5 h1:kxhtnfFVi+rYdOALN0B3k9UT86zVJKfBimRaciULW4I= github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -419,17 +344,11 @@ github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gosuri/uilive v0.0.0-20170323041506-ac356e6e42cd/go.mod h1:qkLSc0A5EXSP6B04TrN4oQoxqFI7A8XvoXSlJi8cwk8= -github.com/gosuri/uilive v0.0.3/go.mod h1:qkLSc0A5EXSP6B04TrN4oQoxqFI7A8XvoXSlJi8cwk8= -github.com/gosuri/uiprogress v0.0.0-20170224063937-d0567a9d84a1/go.mod h1:C1RTYn4Sc7iEyf6j8ft5dyoZ4212h8G1ol9QQluh5+0= -github.com/gosuri/uiprogress v0.0.1/go.mod h1:C1RTYn4Sc7iEyf6j8ft5dyoZ4212h8G1ol9QQluh5+0= github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= -github.com/graph-gophers/graphql-go v0.0.0-20200819123640-3b5ddcd884ae/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 h1:sezaKhEfPFg8W0Enm61B9Gs911H8iesGY5R8NDPtd1M= github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= @@ -447,7 +366,6 @@ github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerX github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -467,15 +385,11 @@ github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iU github.com/holiman/uint256 v1.1.1 h1:4JywC80b+/hSfljFlEBLHrrh+CIONLDz9NuFl0af4Mw= github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= -github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= -github.com/huandu/xstrings v1.2.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.3.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.0.1-0.20200620063722-49508fba0031 h1:HarGZ5h9HD9LgEg1yRVMXyfiw4wlXiLiYM2oMjeA/SE= -github.com/huin/goupnp v1.0.1-0.20200620063722-49508fba0031/go.mod h1:nNs7wvRfN1eKaMknBydLNQU6146XQim8t4h+q90biWo= +github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88 h1:bcAj8KroPf552TScjFPIakjH2/tdIrIH8F+cc4v4SRo= +github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88/go.mod h1:nNs7wvRfN1eKaMknBydLNQU6146XQim8t4h+q90biWo= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/iancoleman/orderedmap v0.1.0 h1:2orAxZBJsvimgEBmMWfXaFlzSG2fbQil5qzP3F6cCkg= @@ -486,14 +400,11 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= -github.com/influxdata/influxdb v1.8.2/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= github.com/influxdata/influxdb v1.8.3 h1:WEypI1BQFTT4teLM+1qkEcvUi0dAvopAI/ir0vAiBg8= github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= @@ -503,28 +414,24 @@ github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1: github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U= github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= -github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= -github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -538,40 +445,31 @@ github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/karalabe/usb v0.0.0-20191104083709-911d15fe12a9 h1:ZHuwnjpP8LsVsUYqTqeVAI+GfDfJ6UNPrExZF+vX/DQ= github.com/karalabe/usb v0.0.0-20191104083709-911d15fe12a9/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= -github.com/kevinburke/go-bindata v3.21.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi30bslSp9YqD9pysLxunQDdb2CPM= github.com/kilic/bls12-381 v0.0.0-20201226121925-69dacb279461/go.mod h1:vDTTHJONJ6G+P2R74EhnyotQDTliQDnFEwhdmfzw1ig= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.10.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/klauspost/reedsolomon v1.9.3/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ/pcNv7fu+8Un4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leanovate/gopter v0.2.8/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= +github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/lmdb-go v1.17.4/go.mod h1:NKRpCxksoTQPyxsUcBiVOe0135uqnJsnf6cElxmOL0o= -github.com/ledgerwatch/turbo-geth v0.0.0-20210308133656-9d397082536c/go.mod h1:9RNKH1PVtf16otlST54SEFvOSvQEyJamKvrJ5knlHIs= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/llgcode/draw2d v0.0.0-20200603164053-19660b984a28/go.mod h1:mVa0dA29Db2S4LVqDYLlsePDzRJLDfdhVZiI15uY0FA= -github.com/llgcode/ps v0.0.0-20150911083025-f1443b32eedb/go.mod h1:1l8ky+Ew27CMX29uG+a2hNOKpeNYEQjjtiALiBlFQbY= -github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/lukechampine/stm v0.0.0-20191022212748-05486c32d236/go.mod h1:wTLsd5FC9rts7GkMpsPGk64CIuea+03yaLAp19Jmlg8= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -590,20 +488,14 @@ github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HN github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= -github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-sqlite3 v1.7.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.13.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v2.0.2+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= @@ -623,7 +515,6 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcouEdwIeOtOGA/ELRUw/GwvxwfT+0= @@ -636,6 +527,7 @@ github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= @@ -643,24 +535,21 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8= -github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 h1:CznVS40zms0Dj5he4ERo+fRPtO0qxUk8lA8Xu3ddet0= github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333/go.mod h1:Ag6rSXkHIckQmjFBCweJEEt1mrTPBv8b9W4aU/NQWfI= -github.com/openconfig/gnmi v0.0.0-20190823184014-89b2bf29312c/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= -github.com/openconfig/reference v0.0.0-20190727015836-8dfd928c9696/go.mod h1:ym2A+zigScwkSEb/cVQB0/ZMpU3rqiH6X7WRRsxgOGw= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -676,11 +565,9 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/petar/GoLLRB v0.0.0-20190514000832-33fb24c13b99/go.mod h1:HUpKUBZnpzkdx0kD/+Yfuft+uD3zHGtXF/XJB14TUr4= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/peterh/liner v1.2.0 h1:w/UPXyl5GfahFxcTOz2j9wCIHNI+pUPr2laqpojKNCg= @@ -689,7 +576,6 @@ github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG github.com/phpdave11/gofpdi v1.0.7/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -704,38 +590,27 @@ github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.0.10/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic= github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= @@ -748,31 +623,25 @@ github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubr github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.20.5+incompatible h1:tYH07UPoQt0OCQdgWWMgYHy3/a9bcxNpBIysykNIP7I= github.com/shirou/gopsutil v2.20.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil/v3 v3.21.1/go.mod h1:igHnfak0qnw1biGeI2qKQvu0ZkwvEkUcCLlYhZzdr/4= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac h1:wbW+Bybf9pXxnCFAOWZTqkRjAc7rAIwo2e1ArUhiHxg= github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= -github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= @@ -799,23 +668,18 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syncthing/syncthing v0.14.48-rc.4/go.mod h1:nw3siZwHPA6M8iSfjDCWQ402eqvEIasMQOE8nFOxy7M= github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= -github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= -github.com/templexxx/cpufeat v0.0.0-20180724012125-cef66df7f161/go.mod h1:wM7WEvslTq+iOEAMDLSzhVuOt5BRZ05WirO+b09GHQU= -github.com/templexxx/xor v0.0.0-20191217153810-f85b25db303b/go.mod h1:5XA7W9S6mni3h5uvOC75dA3m9CCCaS83lltmc0ukdi4= +github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 h1:xQdMZ1WLrgkkvOZ/LDQxjVxMLdby7osSh4ZEVa5sIjs= +github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/tidwall/gjson v1.6.0 h1:9VEQWz6LLMUsUl6PueE49ir4Ka6CzLymOAZDxpFsTDc= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= @@ -823,30 +687,16 @@ github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0 github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tinylib/msgp v1.1.1/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tjfoc/gmsm v1.3.0/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/tyler-smith/go-bip39 v1.0.2 h1:+t3w+KwLXO6154GNJY+qUtIxLTmFjfUmpguQT1OlOT8= github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= -github.com/ugorji/go v1.1.13/go.mod h1:jxau1n+/wyTGLQoCkjok9r5zFa/FxT6eI5HiHKQszjc= -github.com/ugorji/go/codec v1.1.13/go.mod h1:oNVt3Dq+FO91WNQ/9JnHKQP2QJxTzoN7wCBFCq1OeuU= -github.com/ugorji/go/codec/codecgen v1.1.13/go.mod h1:EhCxlc7Crov+HLygD4+hBCitXNrrGKRrRWj+pRsyJGg= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/valyala/fastjson v1.6.3/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= -github.com/wcharczuk/go-chart v2.0.1+incompatible/go.mod h1:PF5tmL4EIx/7Wf+hEkpCqYi5He4u90sw+0+6FhrryuE= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/willf/bloom v0.0.0-20170505221640-54e3b963ee16/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8= -github.com/willf/bloom v2.0.3+incompatible/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8= github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= @@ -855,12 +705,8 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17 github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/xtaci/kcp-go v5.4.20+incompatible/go.mod h1:bN6vIwHQbfHaHtFpEssmWsN45a+AZwO7eyRCmEIbtvE= -github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae/go.mod h1:gXtu8J62kEgmN++bm9BVICuT/e8yiLI2KFobd/TRFsE= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -886,10 +732,7 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -924,7 +767,6 @@ golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mobile v0.0.0-20200801112145-973feb4309de/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= @@ -933,8 +775,6 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -948,33 +788,25 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190318221613-d196dffd7c2b/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191125084936-ffdde1057850/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201022231255-08b38378de70 h1:Z6x4N9mAi4oF0TbHweCsH618MO6OI6UFgV0FP5n0wBY= golang.org/x/net v0.0.0-20201022231255-08b38378de70/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d h1:1aflnvSoWWLI2k/dMUAl5lvU1YO4Mb4hz0gh+1rjcxU= +golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -992,13 +824,11 @@ golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1006,61 +836,49 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191105231009-c1f44814a5cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191126131656-8a8471f7e56d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200219091948-cb0a6d8edb6c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201024232916-9f70ab9862d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210105210732-16f7687f5001/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181205014116-22934f0fdb62/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -1085,19 +903,16 @@ golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191126055441-b0650ceb63d9/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200221224223-e1da425f72fd/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200928182047-19e03678916f/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= @@ -1105,6 +920,7 @@ gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/gonum v0.8.1 h1:wGtP3yGpc5mCLOLeTeBdjeui9oZSz5De0eOjMLC/QuQ= gonum.org/v1/gonum v0.8.1/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= gonum.org/v1/plot v0.8.0 h1:dNgubmltsMoehfn6XgbutHpicbUfbkcGSxkICy1bC4o= @@ -1138,8 +954,6 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200218151345-dad8c97a84f5/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1150,13 +964,8 @@ google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.0.1/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1168,9 +977,9 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/bsm/ratelimit.v1 v1.0.0-20160220154919-db14e161995a/go.mod h1:KF9sEfUPAXdG8Oev9e99iLGnl2uJMjc5B+4y3O7x610= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -1178,18 +987,13 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= -gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= -gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= -gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= -gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= -gopkg.in/redis.v4 v4.2.4/go.mod h1:8KREHdypkCEojGKQcjMqAODMICIVwZAONWq8RowTITA= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0= gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= @@ -1197,14 +1001,14 @@ gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRN gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1213,6 +1017,7 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/graphql/graphql.go b/graphql/graphql.go index c7eb8d4aa0..f7b67e4ccc 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -151,6 +151,20 @@ func (l *Log) Data(ctx context.Context) hexutil.Bytes { return l.log.Data } +// AccessTuple represents EIP-2930 +type AccessTuple struct { + address common.Address + storageKeys *[]common.Hash +} + +func (at *AccessTuple) Address(ctx context.Context) common.Address { + return at.address +} + +func (at *AccessTuple) StorageKeys(ctx context.Context) *[]common.Hash { + return at.storageKeys +} + // Transaction represents an Ethereum transaction. // backend and hash are mandatory; all others will be fetched when required. type Transaction struct { @@ -342,6 +356,31 @@ func (t *Transaction) Logs(ctx context.Context) (*[]*Log, error) { return &ret, nil } +func (t *Transaction) Type(ctx context.Context) (*int32, error) { + tx, err := t.resolve(ctx) + if err != nil { + return nil, err + } + txType := int32(tx.Type()) + return &txType, nil +} + +func (t *Transaction) AccessList(ctx context.Context) (*[]*AccessTuple, error) { + tx, err := t.resolve(ctx) + if err != nil || tx == nil { + return nil, err + } + accessList := tx.AccessList() + ret := make([]*AccessTuple, 0, len(accessList)) + for _, al := range accessList { + ret = append(ret, &AccessTuple{ + address: al.Address, + storageKeys: &al.StorageKeys, + }) + } + return &ret, nil +} + func (t *Transaction) R(ctx context.Context) (hexutil.Big, error) { tx, err := t.resolve(ctx) if err != nil || tx == nil { diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go index ca3b0ef0ba..6ba5400651 100644 --- a/graphql/graphql_test.go +++ b/graphql/graphql_test.go @@ -25,8 +25,12 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/node" @@ -56,7 +60,7 @@ func TestBuildSchema(t *testing.T) { // Tests that a graphQL request is successfully handled when graphql is enabled on the specified endpoint func TestGraphQLBlockSerialization(t *testing.T) { - stack := createNode(t, true) + stack := createNode(t, true, false) defer stack.Close() // start node if err := stack.Start(); err != nil { @@ -158,9 +162,45 @@ func TestGraphQLBlockSerialization(t *testing.T) { } } +func TestGraphQLBlockSerializationEIP2718(t *testing.T) { + stack := createNode(t, true, true) + defer stack.Close() + // start node + if err := stack.Start(); err != nil { + t.Fatalf("could not start node: %v", err) + } + + for i, tt := range []struct { + body string + want string + code int + }{ + { + body: `{"query": "{block {number transactions { from { address } to { address } value hash type accessList { address storageKeys } index}}}"}`, + want: `{"data":{"block":{"number":1,"transactions":[{"from":{"address":"0x71562b71999873db5b286df957af199ec94617f7"},"to":{"address":"0x0000000000000000000000000000000000000dad"},"value":"0x64","hash":"0x4f7b8d718145233dcf7f29e34a969c63dd4de8715c054ea2af022b66c4f4633e","type":0,"accessList":[],"index":0},{"from":{"address":"0x71562b71999873db5b286df957af199ec94617f7"},"to":{"address":"0x0000000000000000000000000000000000000dad"},"value":"0x32","hash":"0x9c6c2c045b618fe87add0e49ba3ca00659076ecae00fd51de3ba5d4ccf9dbf40","type":1,"accessList":[{"address":"0x0000000000000000000000000000000000000dad","storageKeys":["0x0000000000000000000000000000000000000000000000000000000000000000"]}],"index":1}]}}}`, + code: 200, + }, + } { + resp, err := http.Post(fmt.Sprintf("%s/graphql", stack.HTTPEndpoint()), "application/json", strings.NewReader(tt.body)) + if err != nil { + t.Fatalf("could not post: %v", err) + } + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("could not read from response body: %v", err) + } + if have := string(bodyBytes); have != tt.want { + t.Errorf("testcase %d %s,\nhave:\n%v\nwant:\n%v", i, tt.body, have, tt.want) + } + if tt.code != resp.StatusCode { + t.Errorf("testcase %d %s,\nwrong statuscode, have: %v, want: %v", i, tt.body, resp.StatusCode, tt.code) + } + } +} + // Tests that a graphQL request is not handled successfully when graphql is not enabled on the specified endpoint func TestGraphQLHTTPOnSamePort_GQLRequest_Unsuccessful(t *testing.T) { - stack := createNode(t, false) + stack := createNode(t, false, false) defer stack.Close() if err := stack.Start(); err != nil { t.Fatalf("could not start node: %v", err) @@ -174,7 +214,7 @@ func TestGraphQLHTTPOnSamePort_GQLRequest_Unsuccessful(t *testing.T) { assert.Equal(t, http.StatusNotFound, resp.StatusCode) } -func createNode(t *testing.T, gqlEnabled bool) *node.Node { +func createNode(t *testing.T, gqlEnabled bool, txEnabled bool) *node.Node { stack, err := node.New(&node.Config{ HTTPHost: "127.0.0.1", HTTPPort: 0, @@ -187,7 +227,11 @@ func createNode(t *testing.T, gqlEnabled bool) *node.Node { if !gqlEnabled { return stack } - createGQLService(t, stack) + if !txEnabled { + createGQLService(t, stack) + } else { + createGQLServiceWithTransactions(t, stack) + } return stack } @@ -227,3 +271,87 @@ func createGQLService(t *testing.T, stack *node.Node) { t.Fatalf("could not create graphql service: %v", err) } } + +func createGQLServiceWithTransactions(t *testing.T, stack *node.Node) { + // create backend + key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + address := crypto.PubkeyToAddress(key.PublicKey) + funds := big.NewInt(1000000000) + dad := common.HexToAddress("0x0000000000000000000000000000000000000dad") + + ethConf := ðconfig.Config{ + Genesis: &genesisT.Genesis{ + Config: params.AllEthashProtocolChanges, + GasLimit: 11500000, + Difficulty: big.NewInt(1048576), + Alloc: genesisT.GenesisAlloc{ + address: {Balance: funds}, + // The address 0xdad sloads 0x00 and 0x01 + dad: { + Code: []byte{ + byte(vm.PC), + byte(vm.PC), + byte(vm.SLOAD), + byte(vm.SLOAD), + }, + Nonce: 0, + Balance: big.NewInt(0), + }, + }, + }, + Ethash: ethash.Config{ + PowMode: ethash.ModeFake, + }, + NetworkId: 1337, + TrieCleanCache: 5, + TrieCleanCacheJournal: "triecache", + TrieCleanCacheRejournal: 60 * time.Minute, + TrieDirtyCache: 5, + TrieTimeout: 60 * time.Minute, + SnapshotCache: 5, + } + + ethBackend, err := eth.New(stack, ethConf) + if err != nil { + t.Fatalf("could not create eth backend: %v", err) + } + signer := types.LatestSigner(ethConf.Genesis.Config) + + legacyTx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ + Nonce: uint64(0), + To: &dad, + Value: big.NewInt(100), + Gas: 50000, + GasPrice: big.NewInt(1), + }) + envelopTx, _ := types.SignNewTx(key, signer, &types.AccessListTx{ + ChainID: ethConf.Genesis.Config.GetChainID(), + Nonce: uint64(1), + To: &dad, + Gas: 30000, + GasPrice: big.NewInt(1), + Value: big.NewInt(50), + AccessList: types.AccessList{{ + Address: dad, + StorageKeys: []common.Hash{{0}}, + }}, + }) + + // Create some blocks and import them + chain, _ := core.GenerateChain(params.AllEthashProtocolChanges, ethBackend.BlockChain().Genesis(), + ethash.NewFaker(), ethBackend.ChainDb(), 1, func(i int, b *core.BlockGen) { + b.SetCoinbase(common.Address{1}) + b.AddTx(legacyTx) + b.AddTx(envelopTx) + }) + + _, err = ethBackend.BlockChain().InsertChain(chain) + if err != nil { + t.Fatalf("could not create import blocks: %v", err) + } + // create gql service + err = New(stack, ethBackend.APIBackend, []string{}, []string{}) + if err != nil { + t.Fatalf("could not create graphql service: %v", err) + } +} diff --git a/graphql/schema.go b/graphql/schema.go index 6ea63db636..d792bb9153 100644 --- a/graphql/schema.go +++ b/graphql/schema.go @@ -69,6 +69,12 @@ const schema string = ` transaction: Transaction! } + #EIP-2718 + type AccessTuple{ + address: Address! + storageKeys : [Bytes32!] + } + # Transaction is an Ethereum transaction. type Transaction { # Hash is the hash of this transaction. @@ -118,6 +124,9 @@ const schema string = ` r: BigInt! s: BigInt! v: BigInt! + #Envelope transaction support + type: Int + accessList: [AccessTuple!] } # BlockFilterCriteria encapsulates log filter criteria for a filter applied diff --git a/internal/debug/flags.go b/internal/debug/flags.go index 2c92b19de6..bac5234b14 100644 --- a/internal/debug/flags.go +++ b/internal/debug/flags.go @@ -41,22 +41,22 @@ var ( Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail", Value: 3, } - logjsonFlag = cli.BoolFlag{ - Name: "log.json", - Usage: "Format logs with JSON", - } vmoduleFlag = cli.StringFlag{ Name: "vmodule", Usage: "Per-module verbosity: comma-separated list of = (e.g. eth/*=5,p2p=4)", Value: "", } + logjsonFlag = cli.BoolFlag{ + Name: "log.json", + Usage: "Format logs with JSON", + } backtraceAtFlag = cli.StringFlag{ - Name: "backtrace", + Name: "log.backtrace", Usage: "Request a stack trace at a specific logging statement (e.g. \"block.go:271\")", Value: "", } debugFlag = cli.BoolFlag{ - Name: "debug", + Name: "log.debug", Usage: "Prepends log messages with call-site location (file and line number)", } pprofFlag = cli.BoolFlag{ @@ -90,20 +90,72 @@ var ( Name: "trace", Usage: "Write execution trace to the given file", } + // (Deprecated April 2020) + legacyPprofPortFlag = cli.IntFlag{ + Name: "pprofport", + Usage: "pprof HTTP server listening port (deprecated, use --pprof.port)", + Value: 6060, + } + legacyPprofAddrFlag = cli.StringFlag{ + Name: "pprofaddr", + Usage: "pprof HTTP server listening interface (deprecated, use --pprof.addr)", + Value: "127.0.0.1", + } + legacyMemprofilerateFlag = cli.IntFlag{ + Name: "memprofilerate", + Usage: "Turn on memory profiling with the given rate (deprecated, use --pprof.memprofilerate)", + Value: runtime.MemProfileRate, + } + legacyBlockprofilerateFlag = cli.IntFlag{ + Name: "blockprofilerate", + Usage: "Turn on block profiling with the given rate (deprecated, use --pprof.blockprofilerate)", + } + legacyCpuprofileFlag = cli.StringFlag{ + Name: "cpuprofile", + Usage: "Write CPU profile to the given file (deprecated, use --pprof.cpuprofile)", + } + legacyBacktraceAtFlag = cli.StringFlag{ + Name: "backtrace", + Usage: "Request a stack trace at a specific logging statement (e.g. \"block.go:271\") (deprecated, use --log.backtrace)", + Value: "", + } + legacyDebugFlag = cli.BoolFlag{ + Name: "debug", + Usage: "Prepends log messages with call-site location (file and line number) (deprecated, use --log.debug)", + } ) // Flags holds all command-line flags required for debugging. var Flags = []cli.Flag{ - verbosityFlag, logjsonFlag, vmoduleFlag, backtraceAtFlag, debugFlag, - pprofFlag, pprofAddrFlag, pprofPortFlag, memprofilerateFlag, - blockprofilerateFlag, cpuprofileFlag, traceFlag, + verbosityFlag, + vmoduleFlag, + logjsonFlag, + backtraceAtFlag, + debugFlag, + pprofFlag, + pprofAddrFlag, + pprofPortFlag, + memprofilerateFlag, + blockprofilerateFlag, + cpuprofileFlag, + traceFlag, } -var ( - glogger *log.GlogHandler -) +// This is the list of deprecated debugging flags. +var DeprecatedFlags = []cli.Flag{ + legacyPprofPortFlag, + legacyPprofAddrFlag, + legacyMemprofilerateFlag, + legacyBlockprofilerateFlag, + legacyCpuprofileFlag, + legacyBacktraceAtFlag, + legacyDebugFlag, +} + +var glogger *log.GlogHandler func init() { + Flags = append(Flags, DeprecatedFlags...) glogger = log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) glogger.Verbosity(log.LvlInfo) log.Root().SetHandler(glogger) @@ -124,17 +176,54 @@ func Setup(ctx *cli.Context) error { ostream = log.StreamHandler(output, log.TerminalFormat(usecolor)) } glogger.SetHandler(ostream) + // logging - log.PrintOrigins(ctx.GlobalBool(debugFlag.Name)) - glogger.Verbosity(log.Lvl(ctx.GlobalInt(verbosityFlag.Name))) - glogger.Vmodule(ctx.GlobalString(vmoduleFlag.Name)) - glogger.BacktraceAt(ctx.GlobalString(backtraceAtFlag.Name)) + verbosity := ctx.GlobalInt(verbosityFlag.Name) + glogger.Verbosity(log.Lvl(verbosity)) + vmodule := ctx.GlobalString(vmoduleFlag.Name) + glogger.Vmodule(vmodule) + + debug := ctx.GlobalBool(debugFlag.Name) + if ctx.GlobalIsSet(legacyDebugFlag.Name) { + debug = ctx.GlobalBool(legacyDebugFlag.Name) + log.Warn("The flag --debug is deprecated and will be removed in the future, please use --log.debug") + } + if ctx.GlobalIsSet(debugFlag.Name) { + debug = ctx.GlobalBool(debugFlag.Name) + } + log.PrintOrigins(debug) + + backtrace := ctx.GlobalString(backtraceAtFlag.Name) + if b := ctx.GlobalString(legacyBacktraceAtFlag.Name); b != "" { + backtrace = b + log.Warn("The flag --backtrace is deprecated and will be removed in the future, please use --log.backtrace") + } + if b := ctx.GlobalString(backtraceAtFlag.Name); b != "" { + backtrace = b + } + glogger.BacktraceAt(backtrace) + log.Root().SetHandler(glogger) // profiling, tracing - runtime.MemProfileRate = ctx.GlobalInt(memprofilerateFlag.Name) + runtime.MemProfileRate = memprofilerateFlag.Value + if ctx.GlobalIsSet(legacyMemprofilerateFlag.Name) { + runtime.MemProfileRate = ctx.GlobalInt(legacyMemprofilerateFlag.Name) + log.Warn("The flag --memprofilerate is deprecated and will be removed in the future, please use --pprof.memprofilerate") + } + if ctx.GlobalIsSet(memprofilerateFlag.Name) { + runtime.MemProfileRate = ctx.GlobalInt(memprofilerateFlag.Name) + } - Handler.SetBlockProfileRate(ctx.GlobalInt(blockprofilerateFlag.Name)) + blockProfileRate := blockprofilerateFlag.Value + if ctx.GlobalIsSet(legacyBlockprofilerateFlag.Name) { + blockProfileRate = ctx.GlobalInt(legacyBlockprofilerateFlag.Name) + log.Warn("The flag --blockprofilerate is deprecated and will be removed in the future, please use --pprof.blockprofilerate") + } + if ctx.GlobalIsSet(blockprofilerateFlag.Name) { + blockProfileRate = ctx.GlobalInt(blockprofilerateFlag.Name) + } + Handler.SetBlockProfileRate(blockProfileRate) if traceFile := ctx.GlobalString(traceFlag.Name); traceFile != "" { if err := Handler.StartGoTrace(traceFile); err != nil { @@ -146,13 +235,26 @@ func Setup(ctx *cli.Context) error { if err := Handler.StartCPUProfile(cpuFile); err != nil { return err } + } else if legacyCPUFile := ctx.GlobalString(legacyCpuprofileFlag.Name); legacyCPUFile != "" { + log.Warn("The flag --cpuprofile is deprecated and will be removed in the future, please use --pprof.cpuprofile") + if err := Handler.StartCPUProfile(legacyCPUFile); err != nil { + return err + } } // pprof server if ctx.GlobalBool(pprofFlag.Name) { listenHost := ctx.GlobalString(pprofAddrFlag.Name) + if ctx.GlobalIsSet(legacyPprofAddrFlag.Name) { + listenHost = ctx.GlobalString(legacyPprofAddrFlag.Name) + log.Warn("The flag --pprofaddr is deprecated and will be removed in the future, please use --pprof.addr") + } port := ctx.GlobalInt(pprofPortFlag.Name) + if ctx.GlobalIsSet(legacyPprofPortFlag.Name) { + port = ctx.GlobalInt(legacyPprofPortFlag.Name) + log.Warn("The flag --pprofport is deprecated and will be removed in the future, please use --pprof.port") + } address := fmt.Sprintf("%s:%d", listenHost, port) // This context value ("metrics.addr") represents the utils.MetricsHTTPFlag.Name. diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index acde3cb69f..1461316c2d 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -532,8 +532,12 @@ func NewPublicBlockChainAPI(b Backend) *PublicBlockChainAPI { } // ChainId returns the chainID value for transaction replay protection. -func (s *PublicBlockChainAPI) ChainId() *hexutil.Big { - return (*hexutil.Big)(s.b.ChainConfig().GetChainID()) +func (api *PublicBlockChainAPI) ChainId() (*hexutil.Big, error) { + // if current block is at or past the EIP-155 replay-protection fork block, return chainID from config + if config := api.b.ChainConfig(); config.IsEnabled(config.GetEIP155Transition, api.b.CurrentBlock().Number()) { + return (*hexutil.Big)(api.b.ChainConfig().GetChainID()), nil + } + return nil, fmt.Errorf("chain not synced beyond EIP-155 replay-protection fork block") } // BlockNumber returns the block number of the chain head. @@ -859,7 +863,7 @@ func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.Blo // Get a new instance of the EVM. msg := args.ToMessage(globalGasCap) - evm, vmError, err := b.GetEVM(ctx, msg, state, header) + evm, vmError, err := b.GetEVM(ctx, msg, state, header, nil) if err != nil { return nil, err } @@ -1429,6 +1433,110 @@ func newRPCTransactionFromBlockHash(b *types.Block, hash common.Hash) *RPCTransa return nil } +// accessListResult returns an optional accesslist +// Its the result of the `debug_createAccessList` RPC call. +// It contains an error if the transaction itself failed. +type accessListResult struct { + Accesslist *types.AccessList `json:"accessList"` + Error string `json:"error,omitempty"` + GasUsed hexutil.Uint64 `json:"gasUsed"` +} + +// CreateAccessList creates a EIP-2930 type AccessList for the given transaction. +// Reexec and BlockNrOrHash can be specified to create the accessList on top of a certain state. +func (s *PublicBlockChainAPI) CreateAccessList(ctx context.Context, args SendTxArgs, blockNrOrHash *rpc.BlockNumberOrHash) (*accessListResult, error) { + bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) + if blockNrOrHash != nil { + bNrOrHash = *blockNrOrHash + } + acl, gasUsed, vmerr, err := AccessList(ctx, s.b, bNrOrHash, args) + if err != nil { + return nil, err + } + result := &accessListResult{Accesslist: &acl, GasUsed: hexutil.Uint64(gasUsed)} + if vmerr != nil { + result.Error = vmerr.Error() + } + return result, nil +} + +// AccessList creates an access list for the given transaction. +// If the accesslist creation fails an error is returned. +// If the transaction itself fails, an vmErr is returned. +func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrHash, args SendTxArgs) (acl types.AccessList, gasUsed uint64, vmErr error, err error) { + // Retrieve the execution context + db, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) + if db == nil || err != nil { + return nil, 0, nil, err + } + // If the gas amount is not set, extract this as it will depend on access + // lists and we'll need to reestimate every time + nogas := args.Gas == nil + + // Ensure any missing fields are filled, extract the recipient and input data + if err := args.setDefaults(ctx, b); err != nil { + return nil, 0, nil, err + } + var to common.Address + if args.To != nil { + to = *args.To + } else { + to = crypto.CreateAddress(args.From, uint64(*args.Nonce)) + } + var input []byte + if args.Input != nil { + input = *args.Input + } else if args.Data != nil { + input = *args.Data + } + // Retrieve the precompiles since they don't need to be added to the access list + precompileMap := vm.PrecompiledContractsForConfig(b.ChainConfig(), header.Number) + precompiles := make([]common.Address, len(precompileMap)) + for k := range precompileMap { + precompiles = append(precompiles, k) + } + + // Create an initial tracer + prevTracer := vm.NewAccessListTracer(nil, args.From, to, precompiles) + if args.AccessList != nil { + prevTracer = vm.NewAccessListTracer(*args.AccessList, args.From, to, precompiles) + } + for { + // Retrieve the current access list to expand + accessList := prevTracer.AccessList() + log.Trace("Creating access list", "input", accessList) + + // If no gas amount was specified, each unique access list needs it's own + // gas calculation. This is quite expensive, but we need to be accurate + // and it's convered by the sender only anyway. + if nogas { + args.Gas = nil + if err := args.setDefaults(ctx, b); err != nil { + return nil, 0, nil, err // shouldn't happen, just in case + } + } + // Copy the original db so we don't modify it + statedb := db.Copy() + msg := types.NewMessage(args.From, args.To, uint64(*args.Nonce), args.Value.ToInt(), uint64(*args.Gas), args.GasPrice.ToInt(), input, accessList, false) + + // Apply the transaction with the access list tracer + tracer := vm.NewAccessListTracer(accessList, args.From, to, precompiles) + config := vm.Config{Tracer: tracer, Debug: true} + vmenv, _, err := b.GetEVM(ctx, msg, statedb, header, &config) + if err != nil { + return nil, 0, nil, err + } + res, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())) + if err != nil { + return nil, 0, nil, fmt.Errorf("failed to apply transaction: %v err: %v", args.toTransaction().Hash(), err) + } + if tracer.Equal(prevTracer) { + return accessList, res.UsedGas, res.Err, nil + } + prevTracer = tracer + } +} + // PublicTransactionPoolAPI exposes methods for the RPC interface type PublicTransactionPoolAPI struct { b Backend @@ -1665,7 +1773,6 @@ func (args *SendTxArgs) setDefaults(ctx context.Context, b Backend) error { return errors.New(`contract creation without any data provided`) } } - // Estimate the gas usage if necessary. if args.Gas == nil { // For backwards-compatibility reason, we try both input and data @@ -1706,7 +1813,6 @@ func (args *SendTxArgs) toTransaction() *types.Transaction { } else if args.Data != nil { input = *args.Data } - var data types.TxData if args.AccessList == nil { data = &types.LegacyTx{ diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index 160223fd78..e7228d2d7b 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -63,7 +63,7 @@ type Backend interface { StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) GetTd(ctx context.Context, hash common.Hash) *big.Int - GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) + GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config) (*vm.EVM, func() error, error) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index 52bd4a7ae2..8623091676 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -139,8 +139,8 @@ web3._extend({ params: 3, }), new web3._extend.Method({ - name: 'submitHashRate', - call: 'ethash_submitHashRate', + name: 'submitHashrate', + call: 'ethash_submitHashrate', params: 2, }), ] @@ -197,12 +197,24 @@ web3._extend({ call: 'admin_sleepBlocks', params: 2 }), + new web3._extend.Method({ + name: 'startHTTP', + call: 'admin_startHTTP', + params: 5, + inputFormatter: [null, null, null, null, null] + }), + new web3._extend.Method({ + name: 'stopHTTP', + call: 'admin_stopHTTP' + }), + // This method is deprecated. new web3._extend.Method({ name: 'startRPC', call: 'admin_startRPC', - params: 4, - inputFormatter: [null, null, null, null] + params: 5, + inputFormatter: [null, null, null, null, null] }), + // This method is deprecated. new web3._extend.Method({ name: 'stopRPC', call: 'admin_stopRPC' @@ -579,6 +591,12 @@ web3._extend({ params: 3, inputFormatter: [web3._extend.formatters.inputAddressFormatter, null, web3._extend.formatters.inputBlockNumberFormatter] }), + new web3._extend.Method({ + name: 'createAccessList', + call: 'eth_createAccessList', + params: 2, + inputFormatter: [null, web3._extend.formatters.inputBlockNumberFormatter], + }), ], properties: [ new web3._extend.Property({ diff --git a/les/api.go b/les/api.go index 6491c4dcc4..782bb31ef2 100644 --- a/les/api.go +++ b/les/api.go @@ -31,7 +31,6 @@ var ( errNoCheckpoint = errors.New("no local checkpoint provided") errNotActivated = errors.New("checkpoint registrar is not activated") errUnknownBenchmarkType = errors.New("unknown benchmark type") - errNoPriority = errors.New("priority too low to raise capacity") ) // PrivateLightServerAPI provides an API to access the LES light server. @@ -44,8 +43,20 @@ type PrivateLightServerAPI struct { func NewPrivateLightServerAPI(server *LesServer) *PrivateLightServerAPI { return &PrivateLightServerAPI{ server: server, - defaultPosFactors: server.clientPool.defaultPosFactors, - defaultNegFactors: server.clientPool.defaultNegFactors, + defaultPosFactors: defaultPosFactors, + defaultNegFactors: defaultNegFactors, + } +} + +// parseNode parses either an enode address a raw hex node id +func parseNode(node string) (enode.ID, error) { + if id, err := enode.ParseID(node); err == nil { + return id, nil + } + if node, err := enode.Parse(enode.ValidSchemes, node); err == nil { + return node.ID(), nil + } else { + return enode.ID{}, err } } @@ -54,16 +65,34 @@ func (api *PrivateLightServerAPI) ServerInfo() map[string]interface{} { res := make(map[string]interface{}) res["minimumCapacity"] = api.server.minCapacity res["maximumCapacity"] = api.server.maxCapacity - res["totalCapacity"], res["totalConnectedCapacity"], res["priorityConnectedCapacity"] = api.server.clientPool.capacityInfo() + _, res["totalCapacity"] = api.server.clientPool.Limits() + _, res["totalConnectedCapacity"] = api.server.clientPool.Active() + res["priorityConnectedCapacity"] = 0 //TODO connect when token sale module is added return res } // ClientInfo returns information about clients listed in the ids list or matching the given tags -func (api *PrivateLightServerAPI) ClientInfo(ids []enode.ID) map[enode.ID]map[string]interface{} { +func (api *PrivateLightServerAPI) ClientInfo(nodes []string) map[enode.ID]map[string]interface{} { + var ids []enode.ID + for _, node := range nodes { + if id, err := parseNode(node); err == nil { + ids = append(ids, id) + } + } + res := make(map[enode.ID]map[string]interface{}) - api.server.clientPool.forClients(ids, func(client *clientInfo) { - res[client.node.ID()] = api.clientInfo(client) - }) + if len(ids) == 0 { + ids = api.server.peers.ids() + } + for _, id := range ids { + if peer := api.server.peers.peer(id); peer != nil { + res[id] = api.clientInfo(peer, peer.balance) + } else { + api.server.clientPool.BalanceOperation(id, "", func(balance vfs.AtomicBalanceOperator) { + res[id] = api.clientInfo(nil, balance) + }) + } + } return res } @@ -75,31 +104,35 @@ func (api *PrivateLightServerAPI) ClientInfo(ids []enode.ID) map[enode.ID]map[st // assigned to it. func (api *PrivateLightServerAPI) PriorityClientInfo(start, stop enode.ID, maxCount int) map[enode.ID]map[string]interface{} { res := make(map[enode.ID]map[string]interface{}) - ids := api.server.clientPool.bt.GetPosBalanceIDs(start, stop, maxCount+1) + ids := api.server.clientPool.GetPosBalanceIDs(start, stop, maxCount+1) if len(ids) > maxCount { res[ids[maxCount]] = make(map[string]interface{}) ids = ids[:maxCount] } - if len(ids) != 0 { - api.server.clientPool.forClients(ids, func(client *clientInfo) { - res[client.node.ID()] = api.clientInfo(client) - }) + for _, id := range ids { + if peer := api.server.peers.peer(id); peer != nil { + res[id] = api.clientInfo(peer, peer.balance) + } else { + api.server.clientPool.BalanceOperation(id, "", func(balance vfs.AtomicBalanceOperator) { + res[id] = api.clientInfo(nil, balance) + }) + } } return res } // clientInfo creates a client info data structure -func (api *PrivateLightServerAPI) clientInfo(c *clientInfo) map[string]interface{} { +func (api *PrivateLightServerAPI) clientInfo(peer *clientPeer, balance vfs.ReadOnlyBalance) map[string]interface{} { info := make(map[string]interface{}) - pb, nb := c.balance.GetBalance() - info["isConnected"] = c.connected + pb, nb := balance.GetBalance() + info["isConnected"] = peer != nil info["pricing/balance"] = pb info["priority"] = pb != 0 // cb := api.server.clientPool.ndb.getCurrencyBalance(id) // info["pricing/currency"] = cb.amount - if c.connected { - info["connectionTime"] = float64(mclock.Now()-c.connectedAt) / float64(time.Second) - info["capacity"], _ = api.server.clientPool.ns.GetField(c.node, priorityPoolSetup.CapacityField).(uint64) + if peer != nil { + info["connectionTime"] = float64(mclock.Now()-peer.connectedAt) / float64(time.Second) + info["capacity"] = peer.getCapacity() info["pricing/negBalance"] = nb } return info @@ -107,7 +140,7 @@ func (api *PrivateLightServerAPI) clientInfo(c *clientInfo) map[string]interface // setParams either sets the given parameters for a single connected client (if specified) // or the default parameters applicable to clients connected in the future -func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, client *clientInfo, posFactors, negFactors *vfs.PriceFactors) (updateFactors bool, err error) { +func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, client *clientPeer, posFactors, negFactors *vfs.PriceFactors) (updateFactors bool, err error) { defParams := client == nil for name, value := range params { errValue := func() error { @@ -137,9 +170,8 @@ func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, clien setFactor(&negFactors.RequestFactor) case !defParams && name == "capacity": if capacity, ok := value.(float64); ok && uint64(capacity) >= api.server.minCapacity { - _, err = api.server.clientPool.setCapacity(client.node, client.address, uint64(capacity), 0, true) - // Don't have to call factor update explicitly. It's already done - // in setCapacity function. + _, err = api.server.clientPool.SetCapacity(client.Node(), uint64(capacity), 0, false) + // time factor recalculation is performed automatically by the balance tracker } else { err = errValue() } @@ -159,22 +191,26 @@ func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, clien // SetClientParams sets client parameters for all clients listed in the ids list // or all connected clients if the list is empty -func (api *PrivateLightServerAPI) SetClientParams(ids []enode.ID, params map[string]interface{}) error { +func (api *PrivateLightServerAPI) SetClientParams(nodes []string, params map[string]interface{}) error { var err error - api.server.clientPool.forClients(ids, func(client *clientInfo) { - if client.connected { - posFactors, negFactors := client.balance.GetPriceFactors() - update, e := api.setParams(params, client, &posFactors, &negFactors) + for _, node := range nodes { + var id enode.ID + if id, err = parseNode(node); err != nil { + return err + } + if peer := api.server.peers.peer(id); peer != nil { + posFactors, negFactors := peer.balance.GetPriceFactors() + update, e := api.setParams(params, peer, &posFactors, &negFactors) if update { - client.balance.SetPriceFactors(posFactors, negFactors) + peer.balance.SetPriceFactors(posFactors, negFactors) } if e != nil { err = e } } else { - err = fmt.Errorf("client %064x is not connected", client.node.ID()) + err = fmt.Errorf("client %064x is not connected", id) } - }) + } return err } @@ -182,7 +218,7 @@ func (api *PrivateLightServerAPI) SetClientParams(ids []enode.ID, params map[str func (api *PrivateLightServerAPI) SetDefaultParams(params map[string]interface{}) error { update, err := api.setParams(params, nil, &api.defaultPosFactors, &api.defaultNegFactors) if update { - api.server.clientPool.setDefaultFactors(api.defaultPosFactors, api.defaultNegFactors) + api.server.clientPool.SetDefaultFactors(api.defaultPosFactors, api.defaultNegFactors) } return err } @@ -195,15 +231,19 @@ func (api *PrivateLightServerAPI) SetConnectedBias(bias time.Duration) error { if bias < time.Duration(0) { return fmt.Errorf("bias illegal: %v less than 0", bias) } - api.server.clientPool.setConnectedBias(bias) + api.server.clientPool.SetConnectedBias(bias) return nil } // AddBalance adds the given amount to the balance of a client if possible and returns // the balance before and after the operation -func (api *PrivateLightServerAPI) AddBalance(id enode.ID, amount int64) (balance [2]uint64, err error) { - api.server.clientPool.forClients([]enode.ID{id}, func(c *clientInfo) { - balance[0], balance[1], err = c.balance.AddBalance(amount) +func (api *PrivateLightServerAPI) AddBalance(node string, amount int64) (balance [2]uint64, err error) { + var id enode.ID + if id, err = parseNode(node); err != nil { + return + } + api.server.clientPool.BalanceOperation(id, "", func(nb vfs.AtomicBalanceOperator) { + balance[0], balance[1], err = nb.AddBalance(amount) }) return } @@ -297,16 +337,20 @@ func NewPrivateDebugAPI(server *LesServer) *PrivateDebugAPI { } // FreezeClient forces a temporary client freeze which normally happens when the server is overloaded -func (api *PrivateDebugAPI) FreezeClient(id enode.ID) error { - var err error - api.server.clientPool.forClients([]enode.ID{id}, func(c *clientInfo) { - if c.connected { - c.peer.freeze() - } else { - err = fmt.Errorf("client %064x is not connected", id[:]) - } - }) - return err +func (api *PrivateDebugAPI) FreezeClient(node string) error { + var ( + id enode.ID + err error + ) + if id, err = parseNode(node); err != nil { + return err + } + if peer := api.server.peers.peer(id); peer != nil { + peer.freeze() + return nil + } else { + return fmt.Errorf("client %064x is not connected", id[:]) + } } // PrivateLightAPI provides an API to access the LES light server or light client. diff --git a/les/api_backend.go b/les/api_backend.go index f22a159db4..e1bef55936 100644 --- a/les/api_backend.go +++ b/les/api_backend.go @@ -172,10 +172,13 @@ func (b *LesApiBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int { return nil } -func (b *LesApiBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) { +func (b *LesApiBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config) (*vm.EVM, func() error, error) { + if vmConfig == nil { + vmConfig = new(vm.Config) + } txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(header, b.eth.blockchain, nil) - return vm.NewEVM(context, txContext, state, b.eth.chainConfig, vm.Config{}), state.Error, nil + return vm.NewEVM(context, txContext, state, b.eth.chainConfig, *vmConfig), state.Error, nil } func (b *LesApiBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { @@ -299,14 +302,10 @@ func (b *LesApiBackend) CurrentHeader() *types.Header { return b.eth.blockchain.CurrentHeader() } -func (b *LesApiBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, func(), error) { +func (b *LesApiBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, error) { return b.eth.stateAtBlock(ctx, block, reexec) } -func (b *LesApiBackend) StatesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, func(), error) { - return b.eth.statesInRange(ctx, fromBlock, toBlock, reexec) -} - -func (b *LesApiBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, func(), error) { +func (b *LesApiBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) { return b.eth.stateAtTransaction(ctx, block, txIndex, reexec) } diff --git a/les/client.go b/les/client.go index 23bddf76b8..ceaeaacc69 100644 --- a/les/client.go +++ b/les/client.go @@ -76,17 +76,18 @@ type LightEthereum struct { accountManager *accounts.Manager netRPCService *ethapi.PublicNetAPI - p2pServer *p2p.Server - p2pConfig *p2p.Config + p2pServer *p2p.Server + p2pConfig *p2p.Config + udpEnabled bool } // New creates an instance of the light client. func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { - chainDb, err := stack.OpenDatabase("lightchaindata", config.DatabaseCache, config.DatabaseHandles, "eth/db/chaindata/") + chainDb, err := stack.OpenDatabase("lightchaindata", config.DatabaseCache, config.DatabaseHandles, "eth/db/chaindata/", false) if err != nil { return nil, err } - lesDb, err := stack.OpenDatabase("les.client", 0, 0, "eth/db/les.client") + lesDb, err := stack.OpenDatabase("les.client", 0, 0, "eth/db/lesclient/", false) if err != nil { return nil, err } @@ -116,10 +117,11 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { bloomIndexer: core.NewBloomIndexer(chainDb, vars.BloomBitsBlocksClient, vars.HelperTrieConfirmations), p2pServer: stack.Server(), p2pConfig: &stack.Config().P2P, + udpEnabled: stack.Config().P2P.DiscoveryV5, } var prenegQuery vfc.QueryFunc - if leth.p2pServer.DiscV5 != nil { + if leth.udpEnabled { prenegQuery = leth.prenegQuery } leth.serverPool, leth.serverPoolIterator = vfc.NewServerPool(lesDb, []byte("serverpool:"), time.Second, prenegQuery, &mclock.System{}, config.UltraLightServers, requestList) @@ -205,7 +207,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { // VfluxRequest sends a batch of requests to the given node through discv5 UDP TalkRequest and returns the responses func (s *LightEthereum) VfluxRequest(n *enode.Node, reqs vflux.Requests) vflux.Replies { - if s.p2pServer.DiscV5 == nil { + if !s.udpEnabled { return nil } reqsEnc, _ := rlp.EncodeToBytes(&reqs) @@ -222,7 +224,7 @@ func (s *LightEthereum) VfluxRequest(n *enode.Node, reqs vflux.Requests) vflux.R func (s *LightEthereum) vfxVersion(n *enode.Node) uint { if n.Seq() == 0 { var err error - if s.p2pServer.DiscV5 == nil { + if !s.udpEnabled { return 0 } if n, err = s.p2pServer.DiscV5.RequestENR(n); n != nil && err == nil && n.Seq() != 0 { @@ -353,7 +355,11 @@ func (s *LightEthereum) Protocols() []p2p.Protocol { func (s *LightEthereum) Start() error { log.Warn("Light client mode is an experimental feature") - discovery, err := s.setupDiscovery(s.p2pConfig) + if s.udpEnabled && s.p2pServer.DiscV5 == nil { + s.udpEnabled = false + log.Error("Discovery v5 is not initialized") + } + discovery, err := s.setupDiscovery() if err != nil { return err } diff --git a/les/clientpool.go b/les/clientpool.go deleted file mode 100644 index 1aa63a281e..0000000000 --- a/les/clientpool.go +++ /dev/null @@ -1,439 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package les - -import ( - "fmt" - "sync" - "time" - - "github.com/ethereum/go-ethereum/common/mclock" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/les/utils" - "github.com/ethereum/go-ethereum/les/vflux" - vfs "github.com/ethereum/go-ethereum/les/vflux/server" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" - "github.com/ethereum/go-ethereum/p2p/nodestate" - "github.com/ethereum/go-ethereum/rlp" -) - -const ( - defaultNegExpTC = 3600 // default time constant (in seconds) for exponentially reducing negative balance - - // defaultConnectedBias is applied to already connected clients So that - // already connected client won't be kicked out very soon and we - // can ensure all connected clients can have enough time to request - // or sync some data. - // - // todo(rjl493456442) make it configurable. It can be the option of - // free trial time! - defaultConnectedBias = time.Minute * 3 - inactiveTimeout = time.Second * 10 -) - -// clientPool implements a client database that assigns a priority to each client -// based on a positive and negative balance. Positive balance is externally assigned -// to prioritized clients and is decreased with connection time and processed -// requests (unless the price factors are zero). If the positive balance is zero -// then negative balance is accumulated. -// -// Balance tracking and priority calculation for connected clients is done by -// balanceTracker. activeQueue ensures that clients with the lowest positive or -// highest negative balance get evicted when the total capacity allowance is full -// and new clients with a better balance want to connect. -// -// Already connected nodes receive a small bias in their favor in order to avoid -// accepting and instantly kicking out clients. In theory, we try to ensure that -// each client can have several minutes of connection time. -// -// Balances of disconnected clients are stored in nodeDB including positive balance -// and negative banalce. Boeth positive balance and negative balance will decrease -// exponentially. If the balance is low enough, then the record will be dropped. -type clientPool struct { - vfs.BalanceTrackerSetup - vfs.PriorityPoolSetup - lock sync.Mutex - clock mclock.Clock - closed bool - removePeer func(enode.ID) - ns *nodestate.NodeStateMachine - pp *vfs.PriorityPool - bt *vfs.BalanceTracker - - defaultPosFactors, defaultNegFactors vfs.PriceFactors - posExpTC, negExpTC uint64 - minCap uint64 // The minimal capacity value allowed for any client - connectedBias time.Duration - capLimit uint64 -} - -// clientPoolPeer represents a client peer in the pool. -// Positive balances are assigned to node key while negative balances are assigned -// to freeClientId. Currently network IP address without port is used because -// clients have a limited access to IP addresses while new node keys can be easily -// generated so it would be useless to assign a negative value to them. -type clientPoolPeer interface { - Node() *enode.Node - freeClientId() string - updateCapacity(uint64) - freeze() - allowInactive() bool -} - -// clientInfo defines all information required by clientpool. -type clientInfo struct { - node *enode.Node - address string - peer clientPoolPeer - connected, priority bool - connectedAt mclock.AbsTime - balance *vfs.NodeBalance -} - -// newClientPool creates a new client pool -func newClientPool(ns *nodestate.NodeStateMachine, lesDb ethdb.Database, minCap uint64, connectedBias time.Duration, clock mclock.Clock, removePeer func(enode.ID)) *clientPool { - pool := &clientPool{ - ns: ns, - BalanceTrackerSetup: balanceTrackerSetup, - PriorityPoolSetup: priorityPoolSetup, - clock: clock, - minCap: minCap, - connectedBias: connectedBias, - removePeer: removePeer, - } - pool.bt = vfs.NewBalanceTracker(ns, balanceTrackerSetup, lesDb, clock, &utils.Expirer{}, &utils.Expirer{}) - pool.pp = vfs.NewPriorityPool(ns, priorityPoolSetup, clock, minCap, connectedBias, 4) - - // set default expiration constants used by tests - // Note: server overwrites this if token sale is active - pool.bt.SetExpirationTCs(0, defaultNegExpTC) - - ns.SubscribeState(pool.InactiveFlag.Or(pool.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { - if newState.Equals(pool.InactiveFlag) { - ns.AddTimeout(node, pool.InactiveFlag, inactiveTimeout) - } - if oldState.Equals(pool.InactiveFlag) && newState.Equals(pool.InactiveFlag.Or(pool.PriorityFlag)) { - ns.SetStateSub(node, pool.InactiveFlag, nodestate.Flags{}, 0) // remove timeout - } - }) - - ns.SubscribeState(pool.ActiveFlag.Or(pool.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { - c, _ := ns.GetField(node, clientInfoField).(*clientInfo) - if c == nil { - return - } - c.priority = newState.HasAll(pool.PriorityFlag) - if newState.Equals(pool.ActiveFlag) { - cap, _ := ns.GetField(node, pool.CapacityField).(uint64) - if cap > minCap { - pool.pp.RequestCapacity(node, minCap, 0, true) - } - } - }) - - ns.SubscribeState(pool.InactiveFlag.Or(pool.ActiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { - if oldState.IsEmpty() { - clientConnectedMeter.Mark(1) - log.Debug("Client connected", "id", node.ID()) - } - if oldState.Equals(pool.InactiveFlag) && newState.Equals(pool.ActiveFlag) { - clientActivatedMeter.Mark(1) - log.Debug("Client activated", "id", node.ID()) - } - if oldState.Equals(pool.ActiveFlag) && newState.Equals(pool.InactiveFlag) { - clientDeactivatedMeter.Mark(1) - log.Debug("Client deactivated", "id", node.ID()) - c, _ := ns.GetField(node, clientInfoField).(*clientInfo) - if c == nil || !c.peer.allowInactive() { - pool.removePeer(node.ID()) - } - } - if newState.IsEmpty() { - clientDisconnectedMeter.Mark(1) - log.Debug("Client disconnected", "id", node.ID()) - pool.removePeer(node.ID()) - } - }) - - var totalConnected uint64 - ns.SubscribeField(pool.CapacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { - oldCap, _ := oldValue.(uint64) - newCap, _ := newValue.(uint64) - totalConnected += newCap - oldCap - totalConnectedGauge.Update(int64(totalConnected)) - c, _ := ns.GetField(node, clientInfoField).(*clientInfo) - if c != nil { - c.peer.updateCapacity(newCap) - } - }) - return pool -} - -// stop shuts the client pool down -func (f *clientPool) stop() { - f.lock.Lock() - f.closed = true - f.lock.Unlock() - f.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - // enforces saving all balances in BalanceTracker - f.disconnectNode(node) - }) - f.bt.Stop() -} - -// connect should be called after a successful handshake. If the connection was -// rejected, there is no need to call disconnect. -func (f *clientPool) connect(peer clientPoolPeer) (uint64, error) { - f.lock.Lock() - defer f.lock.Unlock() - - // Short circuit if clientPool is already closed. - if f.closed { - return 0, fmt.Errorf("Client pool is already closed") - } - // Dedup connected peers. - node, freeID := peer.Node(), peer.freeClientId() - if f.ns.GetField(node, clientInfoField) != nil { - log.Debug("Client already connected", "address", freeID, "id", node.ID().String()) - return 0, fmt.Errorf("Client already connected address=%s id=%s", freeID, node.ID().String()) - } - now := f.clock.Now() - c := &clientInfo{ - node: node, - address: freeID, - peer: peer, - connected: true, - connectedAt: now, - } - f.ns.SetField(node, clientInfoField, c) - f.ns.SetField(node, connAddressField, freeID) - if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil { - f.disconnect(peer) - return 0, nil - } - c.balance.SetPriceFactors(f.defaultPosFactors, f.defaultNegFactors) - - f.ns.SetState(node, f.InactiveFlag, nodestate.Flags{}, 0) - var allowed bool - f.ns.Operation(func() { - _, allowed = f.pp.RequestCapacity(node, f.minCap, f.connectedBias, true) - }) - if allowed { - return f.minCap, nil - } - if !peer.allowInactive() { - f.disconnect(peer) - } - return 0, nil -} - -// setConnectedBias sets the connection bias, which is applied to already connected clients -// So that already connected client won't be kicked out very soon and we can ensure all -// connected clients can have enough time to request or sync some data. -func (f *clientPool) setConnectedBias(bias time.Duration) { - f.lock.Lock() - defer f.lock.Unlock() - - f.connectedBias = bias - f.pp.SetActiveBias(bias) -} - -// disconnect should be called when a connection is terminated. If the disconnection -// was initiated by the pool itself using disconnectFn then calling disconnect is -// not necessary but permitted. -func (f *clientPool) disconnect(p clientPoolPeer) { - f.disconnectNode(p.Node()) -} - -// disconnectNode removes node fields and flags related to connected status -func (f *clientPool) disconnectNode(node *enode.Node) { - f.ns.SetField(node, connAddressField, nil) - f.ns.SetField(node, clientInfoField, nil) -} - -// setDefaultFactors sets the default price factors applied to subsequently connected clients -func (f *clientPool) setDefaultFactors(posFactors, negFactors vfs.PriceFactors) { - f.lock.Lock() - defer f.lock.Unlock() - - f.defaultPosFactors = posFactors - f.defaultNegFactors = negFactors -} - -// capacityInfo returns the total capacity allowance, the total capacity of connected -// clients and the total capacity of connected and prioritized clients -func (f *clientPool) capacityInfo() (uint64, uint64, uint64) { - f.lock.Lock() - defer f.lock.Unlock() - - // total priority active cap will be supported when the token issuer module is added - _, activeCap := f.pp.Active() - return f.capLimit, activeCap, 0 -} - -// setLimits sets the maximum number and total capacity of connected clients, -// dropping some of them if necessary. -func (f *clientPool) setLimits(totalConn int, totalCap uint64) { - f.lock.Lock() - defer f.lock.Unlock() - - f.capLimit = totalCap - f.pp.SetLimits(uint64(totalConn), totalCap) -} - -// setCapacity sets the assigned capacity of a connected client -func (f *clientPool) setCapacity(node *enode.Node, freeID string, capacity uint64, bias time.Duration, setCap bool) (uint64, error) { - c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo) - if c == nil { - if setCap { - return 0, fmt.Errorf("client %064x is not connected", node.ID()) - } - c = &clientInfo{node: node} - f.ns.SetField(node, clientInfoField, c) - f.ns.SetField(node, connAddressField, freeID) - if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil { - log.Error("BalanceField is missing", "node", node.ID()) - return 0, fmt.Errorf("BalanceField of %064x is missing", node.ID()) - } - defer func() { - f.ns.SetField(node, connAddressField, nil) - f.ns.SetField(node, clientInfoField, nil) - }() - } - var ( - minPriority int64 - allowed bool - ) - f.ns.Operation(func() { - if !setCap || c.priority { - // check clientInfo.priority inside Operation to ensure thread safety - minPriority, allowed = f.pp.RequestCapacity(node, capacity, bias, setCap) - } - }) - if allowed { - return 0, nil - } - missing := c.balance.PosBalanceMissing(minPriority, capacity, bias) - if missing < 1 { - // ensure that we never return 0 missing and insufficient priority error - missing = 1 - } - return missing, errNoPriority -} - -// setCapacityLocked is the equivalent of setCapacity used when f.lock is already locked -func (f *clientPool) setCapacityLocked(node *enode.Node, freeID string, capacity uint64, minConnTime time.Duration, setCap bool) (uint64, error) { - f.lock.Lock() - defer f.lock.Unlock() - - return f.setCapacity(node, freeID, capacity, minConnTime, setCap) -} - -// forClients calls the supplied callback for either the listed node IDs or all connected -// nodes. It passes a valid clientInfo to the callback and ensures that the necessary -// fields and flags are set in order for BalanceTracker and PriorityPool to work even if -// the node is not connected. -func (f *clientPool) forClients(ids []enode.ID, cb func(client *clientInfo)) { - f.lock.Lock() - defer f.lock.Unlock() - - if len(ids) == 0 { - f.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo) - if c != nil { - cb(c) - } - }) - } else { - for _, id := range ids { - node := f.ns.GetNode(id) - if node == nil { - node = enode.SignNull(&enr.Record{}, id) - } - c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo) - if c != nil { - cb(c) - } else { - c = &clientInfo{node: node} - f.ns.SetField(node, clientInfoField, c) - f.ns.SetField(node, connAddressField, "") - if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance != nil { - cb(c) - } else { - log.Error("BalanceField is missing") - } - f.ns.SetField(node, connAddressField, nil) - f.ns.SetField(node, clientInfoField, nil) - } - } - } -} - -// serveCapQuery serves a vflux capacity query. It receives multiple token amount values -// and a bias time value. For each given token amount it calculates the maximum achievable -// capacity in case the amount is added to the balance. -func (f *clientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []byte { - var req vflux.CapacityQueryReq - if rlp.DecodeBytes(data, &req) != nil { - return nil - } - if l := len(req.AddTokens); l == 0 || l > vflux.CapacityQueryMaxLen { - return nil - } - node := f.ns.GetNode(id) - if node == nil { - node = enode.SignNull(&enr.Record{}, id) - } - c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo) - if c == nil { - c = &clientInfo{node: node} - f.ns.SetField(node, clientInfoField, c) - f.ns.SetField(node, connAddressField, freeID) - defer func() { - f.ns.SetField(node, connAddressField, nil) - f.ns.SetField(node, clientInfoField, nil) - }() - if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil { - log.Error("BalanceField is missing", "node", node.ID()) - return nil - } - } - // use vfs.CapacityCurve to answer request for multiple newly bought token amounts - curve := f.pp.GetCapacityCurve().Exclude(id) - result := make(vflux.CapacityQueryReply, len(req.AddTokens)) - bias := time.Second * time.Duration(req.Bias) - if f.connectedBias > bias { - bias = f.connectedBias - } - pb, _ := c.balance.GetBalance() - for i, addTokens := range req.AddTokens { - add := addTokens.Int64() - result[i] = curve.MaxCapacity(func(capacity uint64) int64 { - return c.balance.EstimatePriority(capacity, add, 0, bias, false) / int64(capacity) - }) - if add <= 0 && uint64(-add) >= pb && result[i] > f.minCap { - result[i] = f.minCap - } - if result[i] < f.minCap { - result[i] = 0 - } - } - reply, _ := rlp.EncodeToBytes(&result) - return reply -} diff --git a/les/enr_entry.go b/les/enr_entry.go index 8be4a7a00e..307313fb10 100644 --- a/les/enr_entry.go +++ b/les/enr_entry.go @@ -18,7 +18,6 @@ package les import ( "github.com/ethereum/go-ethereum/core/forkid" - "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/dnsdisc" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/rlp" @@ -36,13 +35,13 @@ func (lesEntry) ENRKey() string { return "les" } // ethEntry is the "eth" ENR entry. This is redeclared here to avoid depending on package eth. type ethEntry struct { ForkID forkid.ID - _ []rlp.RawValue `rlp:"tail"` + Tail []rlp.RawValue `rlp:"tail"` } func (ethEntry) ENRKey() string { return "eth" } // setupDiscovery creates the node discovery source for the eth protocol. -func (eth *LightEthereum) setupDiscovery(cfg *p2p.Config) (enode.Iterator, error) { +func (eth *LightEthereum) setupDiscovery() (enode.Iterator, error) { it := enode.NewFairMix(0) // Enable DNS discovery. @@ -56,7 +55,7 @@ func (eth *LightEthereum) setupDiscovery(cfg *p2p.Config) (enode.Iterator, error } // Enable DHT. - if cfg.DiscoveryV5 && eth.p2pServer.DiscV5 != nil { + if eth.udpEnabled { it.AddSource(eth.p2pServer.DiscV5.RandomNodes()) } diff --git a/les/flowcontrol/manager.go b/les/flowcontrol/manager.go index d6d0b1adde..c9e681c144 100644 --- a/les/flowcontrol/manager.go +++ b/les/flowcontrol/manager.go @@ -108,7 +108,7 @@ type ClientManager struct { func NewClientManager(curve PieceWiseLinear, clock mclock.Clock) *ClientManager { cm := &ClientManager{ clock: clock, - rcQueue: prque.New(func(a interface{}, i int) { a.(*ClientNode).queueIndex = i }), + rcQueue: prque.NewWrapAround(func(a interface{}, i int) { a.(*ClientNode).queueIndex = i }), capLastUpdate: clock.Now(), stop: make(chan chan struct{}), } diff --git a/les/metrics.go b/les/metrics.go index 9a79fd1bbd..d356326b76 100644 --- a/les/metrics.go +++ b/les/metrics.go @@ -75,7 +75,6 @@ var ( totalCapacityGauge = metrics.NewRegisteredGauge("les/server/totalCapacity", nil) totalRechargeGauge = metrics.NewRegisteredGauge("les/server/totalRecharge", nil) - totalConnectedGauge = metrics.NewRegisteredGauge("les/server/totalConnected", nil) blockProcessingTimer = metrics.NewRegisteredTimer("les/server/blockProcessingTime", nil) requestServedMeter = metrics.NewRegisteredMeter("les/server/req/avgServedTime", nil) @@ -98,12 +97,8 @@ var ( sqServedGauge = metrics.NewRegisteredGauge("les/server/servingQueue/served", nil) sqQueuedGauge = metrics.NewRegisteredGauge("les/server/servingQueue/queued", nil) - clientConnectedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/connected", nil) - clientActivatedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/activated", nil) - clientDeactivatedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/deactivated", nil) - clientDisconnectedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/disconnected", nil) - clientFreezeMeter = metrics.NewRegisteredMeter("les/server/clientEvent/freeze", nil) - clientErrorMeter = metrics.NewRegisteredMeter("les/server/clientEvent/error", nil) + clientFreezeMeter = metrics.NewRegisteredMeter("les/server/clientEvent/freeze", nil) + clientErrorMeter = metrics.NewRegisteredMeter("les/server/clientEvent/error", nil) requestRTT = metrics.NewRegisteredTimer("les/client/req/rtt", nil) requestSendDelay = metrics.NewRegisteredTimer("les/client/req/sendDelay", nil) diff --git a/les/peer.go b/les/peer.go index 20d339ec02..9f59517acc 100644 --- a/les/peer.go +++ b/les/peer.go @@ -17,6 +17,7 @@ package les import ( + "crypto/ecdsa" "errors" "fmt" "math/big" @@ -37,6 +38,7 @@ import ( vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params/types/ctypes" "github.com/ethereum/go-ethereum/rlp" ) @@ -762,15 +764,22 @@ type clientPeer struct { responseLock sync.Mutex responseCount uint64 // Counter to generate an unique id for request processing. - balance *vfs.NodeBalance + balance vfs.ConnectedBalance // invalidLock is used for protecting invalidCount. invalidLock sync.RWMutex invalidCount utils.LinearExpiredValue // Counter the invalid request the client peer has made. - server bool - errCh chan error - fcClient *flowcontrol.ClientNode // Server side mirror token bucket. + capacity uint64 + // lastAnnounce is the last broadcast created by the server; may be newer than the last head + // sent to the specific client (stored in headInfo) if capacity is zero. In this case the + // latest head is sent when the client gains non-zero capacity. + lastAnnounce announceData + + connectedAt mclock.AbsTime + server bool + errCh chan error + fcClient *flowcontrol.ClientNode // Server side mirror token bucket. } func newClientPeer(version int, network uint64, p *p2p.Peer, rw p2p.MsgReadWriter) *clientPeer { @@ -789,9 +798,9 @@ func newClientPeer(version int, network uint64, p *p2p.Peer, rw p2p.MsgReadWrite } } -// freeClientId returns a string identifier for the peer. Multiple peers with +// FreeClientId returns a string identifier for the peer. Multiple peers with // the same identifier can not be connected in free mode simultaneously. -func (p *clientPeer) freeClientId() string { +func (p *clientPeer) FreeClientId() string { if addr, ok := p.RemoteAddr().(*net.TCPAddr); ok { if addr.IP.IsLoopback() { // using peer id instead of loopback ip address allows multiple free @@ -921,25 +930,69 @@ func (p *clientPeer) sendAnnounce(request announceData) error { return p2p.Send(p.rw, AnnounceMsg, request) } -// allowInactive implements clientPoolPeer -func (p *clientPeer) allowInactive() bool { - return false +// InactiveAllowance implements vfs.clientPeer +func (p *clientPeer) InactiveAllowance() time.Duration { + return 0 // will return more than zero for les/5 clients +} + +// getCapacity returns the current capacity of the peer +func (p *clientPeer) getCapacity() uint64 { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.capacity } -// updateCapacity updates the request serving capacity assigned to a given client -// and also sends an announcement about the updated flow control parameters -func (p *clientPeer) updateCapacity(cap uint64) { +// UpdateCapacity updates the request serving capacity assigned to a given client +// and also sends an announcement about the updated flow control parameters. +// Note: UpdateCapacity implements vfs.clientPeer and should not block. The requested +// parameter is true if the callback was initiated by ClientPool.SetCapacity on the given peer. +func (p *clientPeer) UpdateCapacity(newCap uint64, requested bool) { p.lock.Lock() defer p.lock.Unlock() - if cap != p.fcParams.MinRecharge { - p.fcParams = flowcontrol.ServerParams{MinRecharge: cap, BufLimit: cap * bufLimitRatio} + if newCap != p.fcParams.MinRecharge { + p.fcParams = flowcontrol.ServerParams{MinRecharge: newCap, BufLimit: newCap * bufLimitRatio} p.fcClient.UpdateParams(p.fcParams) var kvList keyValueList - kvList = kvList.add("flowControl/MRR", cap) - kvList = kvList.add("flowControl/BL", cap*bufLimitRatio) + kvList = kvList.add("flowControl/MRR", newCap) + kvList = kvList.add("flowControl/BL", newCap*bufLimitRatio) p.queueSend(func() { p.sendAnnounce(announceData{Update: kvList}) }) } + + if p.capacity == 0 && newCap != 0 { + p.sendLastAnnounce() + } + p.capacity = newCap +} + +// announceOrStore sends the given head announcement to the client if the client is +// active (capacity != 0) and the same announcement hasn't been sent before. If the +// client is inactive the announcement is stored and sent later if the client is +// activated again. +func (p *clientPeer) announceOrStore(announce announceData) { + p.lock.Lock() + defer p.lock.Unlock() + + p.lastAnnounce = announce + if p.capacity != 0 { + p.sendLastAnnounce() + } +} + +// announce sends the given head announcement to the client if it hasn't been sent before +func (p *clientPeer) sendLastAnnounce() { + if p.lastAnnounce.Td == nil { + return + } + if p.headInfo.Td == nil || p.lastAnnounce.Td.Cmp(p.headInfo.Td) > 0 { + if !p.queueSend(func() { p.sendAnnounce(p.lastAnnounce) }) { + p.Log().Debug("Dropped announcement because queue is full", "number", p.lastAnnounce.Number, "hash", p.lastAnnounce.Hash) + } else { + p.Log().Debug("Sent announcement", "number", p.lastAnnounce.Number, "hash", p.lastAnnounce.Hash) + } + p.headInfo = blockInfo{Hash: p.lastAnnounce.Hash, Number: p.lastAnnounce.Number, Td: p.lastAnnounce.Td} + } } // freezeClient temporarily puts the client in a frozen state which means all @@ -1064,6 +1117,11 @@ func (p *clientPeer) getInvalid() uint64 { return p.invalidCount.Value(mclock.Now()) } +// Disconnect implements vfs.clientPeer +func (p *clientPeer) Disconnect() { + p.Peer.Disconnect(p2p.DiscRequested) +} + // serverPeerSubscriber is an interface to notify services about added or // removed server peers type serverPeerSubscriber interface { @@ -1221,3 +1279,181 @@ func (ps *serverPeerSet) close() { } ps.closed = true } + +// clientPeerSet represents the set of active client peers currently +// participating in the Light Ethereum sub-protocol. +type clientPeerSet struct { + peers map[enode.ID]*clientPeer + lock sync.RWMutex + closed bool + + privateKey *ecdsa.PrivateKey + lastAnnounce, signedAnnounce announceData +} + +// newClientPeerSet creates a new peer set to track the client peers. +func newClientPeerSet() *clientPeerSet { + return &clientPeerSet{peers: make(map[enode.ID]*clientPeer)} +} + +// register adds a new peer into the peer set, or returns an error if the +// peer is already known. +func (ps *clientPeerSet) register(peer *clientPeer) error { + ps.lock.Lock() + defer ps.lock.Unlock() + + if ps.closed { + return errClosed + } + if _, exist := ps.peers[peer.ID()]; exist { + return errAlreadyRegistered + } + ps.peers[peer.ID()] = peer + ps.announceOrStore(peer) + return nil +} + +// unregister removes a remote peer from the peer set, disabling any further +// actions to/from that particular entity. It also initiates disconnection +// at the networking layer. +func (ps *clientPeerSet) unregister(id enode.ID) error { + ps.lock.Lock() + defer ps.lock.Unlock() + + p, ok := ps.peers[id] + if !ok { + return errNotRegistered + } + delete(ps.peers, id) + p.Peer.Disconnect(p2p.DiscRequested) + return nil +} + +// ids returns a list of all registered peer IDs +func (ps *clientPeerSet) ids() []enode.ID { + ps.lock.RLock() + defer ps.lock.RUnlock() + + var ids []enode.ID + for id := range ps.peers { + ids = append(ids, id) + } + return ids +} + +// peer retrieves the registered peer with the given id. +func (ps *clientPeerSet) peer(id enode.ID) *clientPeer { + ps.lock.RLock() + defer ps.lock.RUnlock() + + return ps.peers[id] +} + +// len returns if the current number of peers in the set. +func (ps *clientPeerSet) len() int { + ps.lock.RLock() + defer ps.lock.RUnlock() + + return len(ps.peers) +} + +// setSignerKey sets the signer key for signed announcements. Should be called before +// starting the protocol handler. +func (ps *clientPeerSet) setSignerKey(privateKey *ecdsa.PrivateKey) { + ps.privateKey = privateKey +} + +// broadcast sends the given announcements to all active peers +func (ps *clientPeerSet) broadcast(announce announceData) { + ps.lock.Lock() + defer ps.lock.Unlock() + + ps.lastAnnounce = announce + for _, peer := range ps.peers { + ps.announceOrStore(peer) + } +} + +// announceOrStore sends the requested type of announcement to the given peer or stores +// it for later if the peer is inactive (capacity == 0). +func (ps *clientPeerSet) announceOrStore(p *clientPeer) { + if ps.lastAnnounce.Td == nil { + return + } + switch p.announceType { + case announceTypeSimple: + p.announceOrStore(ps.lastAnnounce) + case announceTypeSigned: + if ps.signedAnnounce.Hash != ps.lastAnnounce.Hash { + ps.signedAnnounce = ps.lastAnnounce + ps.signedAnnounce.sign(ps.privateKey) + } + p.announceOrStore(ps.signedAnnounce) + } +} + +// close disconnects all peers. No new peers can be registered +// after close has returned. +func (ps *clientPeerSet) close() { + ps.lock.Lock() + defer ps.lock.Unlock() + + for _, p := range ps.peers { + p.Peer.Disconnect(p2p.DiscQuitting) + } + ps.closed = true +} + +// serverSet is a special set which contains all connected les servers. +// Les servers will also be discovered by discovery protocol because they +// also run the LES protocol. We can't drop them although they are useless +// for us(server) but for other protocols(e.g. ETH) upon the devp2p they +// may be useful. +type serverSet struct { + lock sync.Mutex + set map[string]*clientPeer + closed bool +} + +func newServerSet() *serverSet { + return &serverSet{set: make(map[string]*clientPeer)} +} + +func (s *serverSet) register(peer *clientPeer) error { + s.lock.Lock() + defer s.lock.Unlock() + + if s.closed { + return errClosed + } + if _, exist := s.set[peer.id]; exist { + return errAlreadyRegistered + } + s.set[peer.id] = peer + return nil +} + +func (s *serverSet) unregister(peer *clientPeer) error { + s.lock.Lock() + defer s.lock.Unlock() + + if s.closed { + return errClosed + } + if _, exist := s.set[peer.id]; !exist { + return errNotRegistered + } + delete(s.set, peer.id) + peer.Peer.Disconnect(p2p.DiscQuitting) + return nil +} + +func (s *serverSet) close() { + s.lock.Lock() + defer s.lock.Unlock() + + for _, p := range s.set { + p.Peer.Disconnect(p2p.DiscQuitting) + } + s.closed = true +} diff --git a/les/server.go b/les/server.go index ec1bc5a58f..e364daae22 100644 --- a/les/server.go +++ b/les/server.go @@ -18,7 +18,6 @@ package les import ( "crypto/ecdsa" - "reflect" "time" "github.com/ethereum/go-ethereum/common/mclock" @@ -26,7 +25,6 @@ import ( "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/les/flowcontrol" - "github.com/ethereum/go-ethereum/les/vflux" vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" @@ -34,24 +32,16 @@ import ( "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" - "github.com/ethereum/go-ethereum/p2p/nodestate" "github.com/ethereum/go-ethereum/params/vars" "github.com/ethereum/go-ethereum/rpc" ) var ( - serverSetup = &nodestate.Setup{} - clientPeerField = serverSetup.NewField("clientPeer", reflect.TypeOf(&clientPeer{})) - clientInfoField = serverSetup.NewField("clientInfo", reflect.TypeOf(&clientInfo{})) - connAddressField = serverSetup.NewField("connAddr", reflect.TypeOf("")) - balanceTrackerSetup = vfs.NewBalanceTrackerSetup(serverSetup) - priorityPoolSetup = vfs.NewPriorityPoolSetup(serverSetup) + defaultPosFactors = vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1} + defaultNegFactors = vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1} ) -func init() { - balanceTrackerSetup.Connect(connAddressField, priorityPoolSetup.CapacityField) - priorityPoolSetup.Connect(balanceTrackerSetup.BalanceField, balanceTrackerSetup.UpdateFlag) // NodeBalance implements nodePriority -} +const defaultConnectedBias = time.Minute * 3 type ethBackend interface { ArchiveMode() bool @@ -65,10 +55,10 @@ type ethBackend interface { type LesServer struct { lesCommons - ns *nodestate.NodeStateMachine archiveMode bool // Flag whether the ethereum node runs in archive mode. handler *serverHandler - broadcaster *broadcaster + peers *clientPeerSet + serverset *serverSet vfluxServer *vfs.Server privateKey *ecdsa.PrivateKey @@ -77,7 +67,7 @@ type LesServer struct { costTracker *costTracker defParams flowcontrol.ServerParams servingQueue *servingQueue - clientPool *clientPool + clientPool *vfs.ClientPool minCapacity, maxCapacity uint64 threadsIdle int // Request serving threads count when system is idle. @@ -87,11 +77,10 @@ type LesServer struct { } func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*LesServer, error) { - lesDb, err := node.OpenDatabase("les.server", 0, 0, "eth/db/les.server") + lesDb, err := node.OpenDatabase("les.server", 0, 0, "eth/db/lesserver/", false) if err != nil { return nil, err } - ns := nodestate.NewNodeStateMachine(nil, nil, mclock.System{}, serverSetup) // Calculate the number of threads used to service the light client // requests based on the user-specified value. threads := config.LightServ * 4 / 100 @@ -111,9 +100,9 @@ func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*Les bloomTrieIndexer: light.NewBloomTrieIndexer(e.ChainDb(), nil, vars.BloomBitsBlocks, vars.BloomTrieFrequency, true), closeCh: make(chan struct{}), }, - ns: ns, archiveMode: e.ArchiveMode(), - broadcaster: newBroadcaster(ns), + peers: newClientPeerSet(), + serverset: newServerSet(), vfluxServer: vfs.NewServer(time.Millisecond * 10), fcManager: flowcontrol.NewClientManager(nil, &mclock.System{}), servingQueue: newServingQueue(int64(time.Millisecond*10), float64(config.LightServ)/100), @@ -121,7 +110,6 @@ func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*Les threadsIdle: threads, p2pSrv: node.Server(), } - srv.vfluxServer.Register(srv) issync := e.Synced if config.LightNoSyncServe { issync = func() bool { return true } @@ -149,8 +137,10 @@ func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*Les srv.maxCapacity = totalRecharge } srv.fcManager.SetCapacityLimits(srv.minCapacity, srv.maxCapacity, srv.minCapacity*2) - srv.clientPool = newClientPool(ns, lesDb, srv.minCapacity, defaultConnectedBias, mclock.System{}, srv.dropClient) - srv.clientPool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1}) + srv.clientPool = vfs.NewClientPool(lesDb, srv.minCapacity, defaultConnectedBias, mclock.System{}, issync) + srv.clientPool.Start() + srv.clientPool.SetDefaultFactors(defaultPosFactors, defaultNegFactors) + srv.vfluxServer.Register(srv.clientPool, "les", "Ethereum light client service") checkpoint := srv.latestLocalCheckpoint() if !checkpoint.Empty() { @@ -162,14 +152,6 @@ func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*Les node.RegisterProtocols(srv.Protocols()) node.RegisterAPIs(srv.APIs()) node.RegisterLifecycle(srv) - - // disconnect all peers at nsm shutdown - ns.SubscribeField(clientPeerField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { - if state.Equals(serverSetup.OfflineFlag()) && oldValue != nil { - oldValue.(*clientPeer).Peer.Disconnect(p2p.DiscRequested) - } - }) - ns.Start() return srv, nil } @@ -198,7 +180,7 @@ func (s *LesServer) APIs() []rpc.API { func (s *LesServer) Protocols() []p2p.Protocol { ps := s.makeProtocols(ServerProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} { - if p := s.getClient(id); p != nil { + if p := s.peers.peer(id); p != nil { return p.Info() } return nil @@ -215,7 +197,7 @@ func (s *LesServer) Protocols() []p2p.Protocol { // Start starts the LES server func (s *LesServer) Start() error { s.privateKey = s.p2pSrv.PrivateKey - s.broadcaster.setSignerKey(s.privateKey) + s.peers.setSignerKey(s.privateKey) s.handler.start() s.wg.Add(1) go s.capacityManagement() @@ -229,8 +211,9 @@ func (s *LesServer) Start() error { func (s *LesServer) Stop() error { close(s.closeCh) - s.clientPool.stop() - s.ns.Stop() + s.clientPool.Stop() + s.serverset.close() + s.peers.close() s.fcManager.Stop() s.costTracker.stop() s.handler.stop() @@ -261,7 +244,7 @@ func (s *LesServer) capacityManagement() { totalCapacityCh := make(chan uint64, 100) totalCapacity := s.fcManager.SubscribeTotalCapacity(totalCapacityCh) - s.clientPool.setLimits(s.config.LightPeers, totalCapacity) + s.clientPool.SetLimits(uint64(s.config.LightPeers), totalCapacity) var ( busy bool @@ -298,39 +281,9 @@ func (s *LesServer) capacityManagement() { log.Warn("Reduced free peer connections", "from", freePeers, "to", newFreePeers) } freePeers = newFreePeers - s.clientPool.setLimits(s.config.LightPeers, totalCapacity) + s.clientPool.SetLimits(uint64(s.config.LightPeers), totalCapacity) case <-s.closeCh: return } } } - -func (s *LesServer) getClient(id enode.ID) *clientPeer { - if node := s.ns.GetNode(id); node != nil { - if p, ok := s.ns.GetField(node, clientPeerField).(*clientPeer); ok { - return p - } - } - return nil -} - -func (s *LesServer) dropClient(id enode.ID) { - if p := s.getClient(id); p != nil { - p.Peer.Disconnect(p2p.DiscRequested) - } -} - -// ServiceInfo implements vfs.Service -func (s *LesServer) ServiceInfo() (string, string) { - return "les", "Ethereum light client service" -} - -// Handle implements vfs.Service -func (s *LesServer) Handle(id enode.ID, address string, name string, data []byte) []byte { - switch name { - case vflux.CapacityQueryName: - return s.clientPool.serveCapQuery(id, address, data) - default: - return nil - } -} diff --git a/les/server_handler.go b/les/server_handler.go index 7651d03cab..5e12136d90 100644 --- a/les/server_handler.go +++ b/les/server_handler.go @@ -17,7 +17,6 @@ package les import ( - "crypto/ecdsa" "errors" "sync" "sync/atomic" @@ -31,13 +30,10 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" - vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/nodestate" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" ) @@ -59,7 +55,6 @@ const ( var ( errTooManyInvalidRequest = errors.New("too many invalid requests made") - errFullClientPool = errors.New("client pool is full") ) // serverHandler is responsible for serving light client and process @@ -128,32 +123,18 @@ func (h *serverHandler) handle(p *clientPeer) error { p.Log().Debug("Light Ethereum handshake failed", "err", err) return err } - // Reject the duplicated peer, otherwise register it to peerset. - var registered bool - if err := h.server.ns.Operation(func() { - if h.server.ns.GetField(p.Node(), clientPeerField) != nil { - registered = true - } else { - h.server.ns.SetFieldSub(p.Node(), clientPeerField, p) - } - }); err != nil { - return err - } - if registered { - return errAlreadyRegistered - } - defer func() { - h.server.ns.SetField(p.Node(), clientPeerField, nil) - if p.fcClient != nil { // is nil when connecting another server - p.fcClient.Disconnect() - } - }() if p.server { + if err := h.server.serverset.register(p); err != nil { + return err + } // connected to another server, no messages expected, just wait for disconnection _, err := p.rw.ReadMsg() + h.server.serverset.unregister(p) return err } + defer p.fcClient.Disconnect() // set by handshake if it's not another server + // Reject light clients if server is not synced. // // Put this checking here, so that "non-synced" les-server peers are still allowed @@ -162,30 +143,31 @@ func (h *serverHandler) handle(p *clientPeer) error { p.Log().Debug("Light server not synced, rejecting peer") return p2p.DiscRequested } - // Disconnect the inbound peer if it's rejected by clientPool - if cap, err := h.server.clientPool.connect(p); cap != p.fcParams.MinRecharge || err != nil { - p.Log().Debug("Light Ethereum peer rejected", "err", errFullClientPool) - return errFullClientPool + if err := h.server.peers.register(p); err != nil { + return err } - p.balance, _ = h.server.ns.GetField(p.Node(), h.server.clientPool.BalanceField).(*vfs.NodeBalance) - if p.balance == nil { + if p.balance = h.server.clientPool.Register(p); p.balance == nil { + h.server.peers.unregister(p.ID()) + p.Log().Debug("Client pool already closed") return p2p.DiscRequested } - activeCount, _ := h.server.clientPool.pp.Active() + activeCount, _ := h.server.clientPool.Active() clientConnectionGauge.Update(int64(activeCount)) + p.connectedAt = mclock.Now() var wg sync.WaitGroup // Wait group used to track all in-flight task routines. - connectedAt := mclock.Now() defer func() { wg.Wait() // Ensure all background task routines have exited. - h.server.clientPool.disconnect(p) + h.server.clientPool.Unregister(p) + h.server.peers.unregister(p.ID()) p.balance = nil - activeCount, _ := h.server.clientPool.pp.Active() + activeCount, _ := h.server.clientPool.Active() clientConnectionGauge.Update(int64(activeCount)) - connectionTimer.Update(time.Duration(mclock.Now() - connectedAt)) + connectionTimer.Update(time.Duration(mclock.Now() - p.connectedAt)) }() - // Mark the peer starts to be served. + + // Mark the peer as being served. atomic.StoreUint32(&p.serving, 1) defer atomic.StoreUint32(&p.serving, 0) @@ -448,78 +430,9 @@ func (h *serverHandler) broadcastLoop() { } lastHead, lastTd = header, td log.Debug("Announcing block to peers", "number", number, "hash", hash, "td", td, "reorg", reorg) - h.server.broadcaster.broadcast(announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg}) + h.server.peers.broadcast(announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg}) case <-h.closeCh: return } } } - -// broadcaster sends new header announcements to active client peers -type broadcaster struct { - ns *nodestate.NodeStateMachine - privateKey *ecdsa.PrivateKey - lastAnnounce, signedAnnounce announceData -} - -// newBroadcaster creates a new broadcaster -func newBroadcaster(ns *nodestate.NodeStateMachine) *broadcaster { - b := &broadcaster{ns: ns} - ns.SubscribeState(priorityPoolSetup.ActiveFlag, func(node *enode.Node, oldState, newState nodestate.Flags) { - if newState.Equals(priorityPoolSetup.ActiveFlag) { - // send last announcement to activated peers - b.sendTo(node) - } - }) - return b -} - -// setSignerKey sets the signer key for signed announcements. Should be called before -// starting the protocol handler. -func (b *broadcaster) setSignerKey(privateKey *ecdsa.PrivateKey) { - b.privateKey = privateKey -} - -// broadcast sends the given announcements to all active peers -func (b *broadcaster) broadcast(announce announceData) { - b.ns.Operation(func() { - // iterate in an Operation to ensure that the active set does not change while iterating - b.lastAnnounce = announce - b.ns.ForEach(priorityPoolSetup.ActiveFlag, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - b.sendTo(node) - }) - }) -} - -// sendTo sends the most recent announcement to the given node unless the same or higher Td -// announcement has already been sent. -func (b *broadcaster) sendTo(node *enode.Node) { - if b.lastAnnounce.Td == nil { - return - } - if p, _ := b.ns.GetField(node, clientPeerField).(*clientPeer); p != nil { - if p.headInfo.Td == nil || b.lastAnnounce.Td.Cmp(p.headInfo.Td) > 0 { - announce := b.lastAnnounce - switch p.announceType { - case announceTypeSimple: - if !p.queueSend(func() { p.sendAnnounce(announce) }) { - log.Debug("Drop announcement because queue is full", "number", announce.Number, "hash", announce.Hash) - } else { - log.Debug("Sent announcement", "number", announce.Number, "hash", announce.Hash) - } - case announceTypeSigned: - if b.signedAnnounce.Hash != b.lastAnnounce.Hash { - b.signedAnnounce = b.lastAnnounce - b.signedAnnounce.sign(b.privateKey) - } - announce := b.signedAnnounce - if !p.queueSend(func() { p.sendAnnounce(announce) }) { - log.Debug("Drop announcement because queue is full", "number", announce.Number, "hash", announce.Hash) - } else { - log.Debug("Sent announcement", "number", announce.Number, "hash", announce.Hash) - } - } - p.headInfo = blockInfo{b.lastAnnounce.Hash, b.lastAnnounce.Number, b.lastAnnounce.Td} - } - } -} diff --git a/les/server_requests.go b/les/server_requests.go index 07f30b1b73..bab5f733d5 100644 --- a/les/server_requests.go +++ b/les/server_requests.go @@ -206,8 +206,8 @@ func handleGetBlockHeaders(msg Decoder) (serveRequestFn, uint64, uint64, error) next = current + r.Query.Skip + 1 ) if next <= current { - infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ") - p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", r.Query.Skip, "next", next, "attacker", infos) + infos, _ := json.Marshal(p.Peer.Info()) + p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", r.Query.Skip, "next", next, "attacker", string(infos)) unknown = true } else { if header := bc.GetHeaderByNumber(next); header != nil { diff --git a/les/servingqueue.go b/les/servingqueue.go index 9db84e6159..16e064cb3f 100644 --- a/les/servingqueue.go +++ b/les/servingqueue.go @@ -123,7 +123,7 @@ func (t *servingTask) waitOrStop() bool { // newServingQueue returns a new servingQueue func newServingQueue(suspendBias int64, utilTarget float64) *servingQueue { sq := &servingQueue{ - queue: prque.New(nil), + queue: prque.NewWrapAround(nil), suspendBias: suspendBias, queueAddCh: make(chan *servingTask, 100), queueBestCh: make(chan *servingTask), @@ -279,7 +279,7 @@ func (sq *servingQueue) updateRecentTime() { func (sq *servingQueue) addTask(task *servingTask) { if sq.best == nil { sq.best = task - } else if task.priority > sq.best.priority { + } else if task.priority-sq.best.priority > 0 { sq.queue.Push(sq.best, sq.best.priority) sq.best = task } else { diff --git a/les/state_accessor.go b/les/state_accessor.go index d3c89f3bef..c291b43a6d 100644 --- a/les/state_accessor.go +++ b/les/state_accessor.go @@ -29,41 +29,27 @@ import ( ) // stateAtBlock retrieves the state database associated with a certain block. -func (leth *LightEthereum) stateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, func(), error) { - return light.NewState(ctx, block.Header(), leth.odr), func() {}, nil -} - -// statesInRange retrieves a batch of state databases associated with the specific -// block ranges. -func (leth *LightEthereum) statesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, func(), error) { - var states []*state.StateDB - for number := fromBlock.NumberU64(); number <= toBlock.NumberU64(); number++ { - header, err := leth.blockchain.GetHeaderByNumberOdr(ctx, number) - if err != nil { - return nil, nil, err - } - states = append(states, light.NewState(ctx, header, leth.odr)) - } - return states, nil, nil +func (leth *LightEthereum) stateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, error) { + return light.NewState(ctx, block.Header(), leth.odr), nil } // stateAtTransaction returns the execution environment of a certain transaction. -func (leth *LightEthereum) stateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, func(), error) { +func (leth *LightEthereum) stateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) { // Short circuit if it's genesis block. if block.NumberU64() == 0 { - return nil, vm.BlockContext{}, nil, nil, errors.New("no transaction in genesis") + return nil, vm.BlockContext{}, nil, errors.New("no transaction in genesis") } // Create the parent state database parent, err := leth.blockchain.GetBlock(ctx, block.ParentHash(), block.NumberU64()-1) if err != nil { - return nil, vm.BlockContext{}, nil, nil, err + return nil, vm.BlockContext{}, nil, err } - statedb, _, err := leth.stateAtBlock(ctx, parent, reexec) + statedb, err := leth.stateAtBlock(ctx, parent, reexec) if err != nil { - return nil, vm.BlockContext{}, nil, nil, err + return nil, vm.BlockContext{}, nil, err } if txIndex == 0 && len(block.Transactions()) == 0 { - return nil, vm.BlockContext{}, statedb, func() {}, nil + return nil, vm.BlockContext{}, statedb, nil } // Recompute transactions up to the target index. signer := types.MakeSigner(leth.blockchain.Config(), block.Number()) @@ -72,17 +58,18 @@ func (leth *LightEthereum) stateAtTransaction(ctx context.Context, block *types. msg, _ := tx.AsMessage(signer) txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(block.Header(), leth.blockchain, nil) + statedb.Prepare(tx.Hash(), block.Hash(), idx) if idx == txIndex { - return msg, context, statedb, func() {}, nil + return msg, context, statedb, nil } // Not yet the searched for transaction, execute on top of the current state vmenv := vm.NewEVM(context, txContext, statedb, leth.blockchain.Config(), vm.Config{}) if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { - return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) } // Ensure any modifications are committed to the state // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect statedb.Finalise(vmenv.ChainConfig().IsEnabled(vmenv.ChainConfig().GetEIP161dTransition, block.Number())) } - return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) + return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) } diff --git a/les/test_helper.go b/les/test_helper.go index 52a8cd09db..77cbfedf49 100644 --- a/les/test_helper.go +++ b/les/test_helper.go @@ -45,10 +45,10 @@ import ( "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/les/checkpointoracle" "github.com/ethereum/go-ethereum/les/flowcontrol" + vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/nodestate" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params/types/ctypes" "github.com/ethereum/go-ethereum/params/types/genesisT" @@ -287,7 +287,6 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da } oracle = checkpointoracle.New(checkpointConfig, getLocal) } - ns := nodestate.NewNodeStateMachine(nil, nil, mclock.System{}, serverSetup) server := &LesServer{ lesCommons: lesCommons{ genesis: genesis.Hash(), @@ -299,8 +298,7 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da oracle: oracle, closeCh: make(chan struct{}), }, - ns: ns, - broadcaster: newBroadcaster(ns), + peers: newClientPeerSet(), servingQueue: newServingQueue(int64(time.Millisecond*10), 1), defParams: flowcontrol.ServerParams{ BufLimit: testBufLimit, @@ -310,18 +308,22 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da } server.costTracker, server.minCapacity = newCostTracker(db, server.config) server.costTracker.testCostList = testCostList(0) // Disable flow control mechanism. - server.clientPool = newClientPool(ns, db, testBufRecharge, defaultConnectedBias, clock, func(id enode.ID) {}) - server.clientPool.setLimits(10000, 10000) // Assign enough capacity for clientpool + server.clientPool = vfs.NewClientPool(db, testBufRecharge, defaultConnectedBias, clock, alwaysTrueFn) + server.clientPool.Start() + server.clientPool.SetLimits(10000, 10000) // Assign enough capacity for clientpool server.handler = newServerHandler(server, simulation.Blockchain(), db, txpool, func() bool { return true }) if server.oracle != nil { server.oracle.Start(simulation) } server.servingQueue.setThreads(4) - ns.Start() server.handler.start() return server.handler, simulation } +func alwaysTrueFn() bool { + return true +} + // testPeer is a simulated peer to allow testing direct network calls. type testPeer struct { cpeer *clientPeer @@ -664,6 +666,10 @@ func newClientServerEnv(t *testing.T, config testnetConfig) (*testServer, *testC return s, c, teardown } -func NewFuzzerPeer(version int) *clientPeer { - return newClientPeer(version, 0, p2p.NewPeer(enode.ID{}, "", nil), nil) +// NewFuzzerPeer creates a client peer for test purposes, and also returns +// a function to close the peer: this is needed to avoid goroutine leaks in the +// exec queue. +func NewFuzzerPeer(version int) (p *clientPeer, closer func()) { + p = newClientPeer(version, 0, p2p.NewPeer(enode.ID{}, "", nil), nil) + return p, func() { p.peerCommons.close() } } diff --git a/les/vflux/client/serverpool.go b/les/vflux/client/serverpool.go index e73b277ced..cc0254c127 100644 --- a/les/vflux/client/serverpool.go +++ b/les/vflux/client/serverpool.go @@ -47,7 +47,8 @@ const ( nodeWeightThreshold = 100 // minimum weight for keeping a node in the the known (valuable) set minRedialWait = 10 // minimum redial wait time in seconds preNegLimit = 5 // maximum number of simultaneous pre-negotiation queries - maxQueryFails = 100 // number of consecutive UDP query failures before we print a warning + warnQueryFails = 20 // number of consecutive UDP query failures before we print a warning + maxQueryFails = 100 // number of consecutive UDP query failures when then chance of skipping a query reaches 50% ) // ServerPool provides a node iterator for dial candidates. The output is a mix of newly discovered @@ -94,16 +95,16 @@ type nodeHistoryEnc struct { type QueryFunc func(*enode.Node) int var ( - clientSetup = &nodestate.Setup{Version: 2} - sfHasValue = clientSetup.NewPersistentFlag("hasValue") - sfQueried = clientSetup.NewFlag("queried") - sfCanDial = clientSetup.NewFlag("canDial") - sfDialing = clientSetup.NewFlag("dialed") - sfWaitDialTimeout = clientSetup.NewFlag("dialTimeout") - sfConnected = clientSetup.NewFlag("connected") - sfRedialWait = clientSetup.NewFlag("redialWait") - sfAlwaysConnect = clientSetup.NewFlag("alwaysConnect") - sfDisableSelection = nodestate.MergeFlags(sfQueried, sfCanDial, sfDialing, sfConnected, sfRedialWait) + clientSetup = &nodestate.Setup{Version: 2} + sfHasValue = clientSetup.NewPersistentFlag("hasValue") + sfQuery = clientSetup.NewFlag("query") + sfCanDial = clientSetup.NewFlag("canDial") + sfDialing = clientSetup.NewFlag("dialed") + sfWaitDialTimeout = clientSetup.NewFlag("dialTimeout") + sfConnected = clientSetup.NewFlag("connected") + sfRedialWait = clientSetup.NewFlag("redialWait") + sfAlwaysConnect = clientSetup.NewFlag("alwaysConnect") + sfDialProcess = nodestate.MergeFlags(sfQuery, sfCanDial, sfDialing, sfConnected, sfRedialWait) sfiNodeHistory = clientSetup.NewPersistentField("nodeHistory", reflect.TypeOf(nodeHistory{}), func(field interface{}) ([]byte, error) { @@ -162,8 +163,8 @@ func NewServerPool(db ethdb.KeyValueStore, dbKey []byte, mixTimeout time.Duratio } s.recalTimeout() s.mixer = enode.NewFairMix(mixTimeout) - knownSelector := NewWrsIterator(s.ns, sfHasValue, sfDisableSelection, sfiNodeWeight) - alwaysConnect := NewQueueIterator(s.ns, sfAlwaysConnect, sfDisableSelection, true, nil) + knownSelector := NewWrsIterator(s.ns, sfHasValue, sfDialProcess, sfiNodeWeight) + alwaysConnect := NewQueueIterator(s.ns, sfAlwaysConnect, sfDialProcess, true, nil) s.mixSources = append(s.mixSources, knownSelector) s.mixSources = append(s.mixSources, alwaysConnect) @@ -226,7 +227,7 @@ func (s *ServerPool) AddMetrics( s.totalValueGauge = totalValueGauge s.sessionValueMeter = sessionValueMeter if serverSelectableGauge != nil { - s.ns.AddLogMetrics(sfHasValue, sfDisableSelection, "selectable", nil, nil, serverSelectableGauge) + s.ns.AddLogMetrics(sfHasValue, sfDialProcess, "selectable", nil, nil, serverSelectableGauge) } if serverDialedMeter != nil { s.ns.AddLogMetrics(sfDialing, nodestate.Flags{}, "dialed", serverDialedMeter, nil, nil) @@ -247,43 +248,51 @@ func (s *ServerPool) AddSource(source enode.Iterator) { // Nodes that are filtered out and does not appear on the output iterator are put back // into redialWait state. func (s *ServerPool) addPreNegFilter(input enode.Iterator, query QueryFunc) enode.Iterator { - s.fillSet = NewFillSet(s.ns, input, sfQueried) - s.ns.SubscribeState(sfQueried, func(n *enode.Node, oldState, newState nodestate.Flags) { - if newState.Equals(sfQueried) { - fails := atomic.LoadUint32(&s.queryFails) - if fails == maxQueryFails { - log.Warn("UDP pre-negotiation query does not seem to work") + s.fillSet = NewFillSet(s.ns, input, sfQuery) + s.ns.SubscribeState(sfDialProcess, func(n *enode.Node, oldState, newState nodestate.Flags) { + if !newState.Equals(sfQuery) { + if newState.HasAll(sfQuery) { + // remove query flag if the node is already somewhere in the dial process + s.ns.SetStateSub(n, nodestate.Flags{}, sfQuery, 0) } - if fails > maxQueryFails { - fails = maxQueryFails - } - if rand.Intn(maxQueryFails*2) < int(fails) { - // skip pre-negotiation with increasing chance, max 50% - // this ensures that the client can operate even if UDP is not working at all - s.ns.SetStateSub(n, sfCanDial, nodestate.Flags{}, time.Second*10) - // set canDial before resetting queried so that FillSet will not read more - // candidates unnecessarily - s.ns.SetStateSub(n, nodestate.Flags{}, sfQueried, 0) - return + return + } + fails := atomic.LoadUint32(&s.queryFails) + failMax := fails + if failMax > maxQueryFails { + failMax = maxQueryFails + } + if rand.Intn(maxQueryFails*2) < int(failMax) { + // skip pre-negotiation with increasing chance, max 50% + // this ensures that the client can operate even if UDP is not working at all + s.ns.SetStateSub(n, sfCanDial, nodestate.Flags{}, time.Second*10) + // set canDial before resetting queried so that FillSet will not read more + // candidates unnecessarily + s.ns.SetStateSub(n, nodestate.Flags{}, sfQuery, 0) + return + } + go func() { + q := query(n) + if q == -1 { + atomic.AddUint32(&s.queryFails, 1) + fails++ + if fails%warnQueryFails == 0 { + // warn if a large number of consecutive queries have failed + log.Warn("UDP connection queries failed", "count", fails) + } + } else { + atomic.StoreUint32(&s.queryFails, 0) } - go func() { - q := query(n) - if q == -1 { - atomic.AddUint32(&s.queryFails, 1) + s.ns.Operation(func() { + // we are no longer running in the operation that the callback belongs to, start a new one because of setRedialWait + if q == 1 { + s.ns.SetStateSub(n, sfCanDial, nodestate.Flags{}, time.Second*10) } else { - atomic.StoreUint32(&s.queryFails, 0) + s.setRedialWait(n, queryCost, queryWaitStep) } - s.ns.Operation(func() { - // we are no longer running in the operation that the callback belongs to, start a new one because of setRedialWait - if q == 1 { - s.ns.SetStateSub(n, sfCanDial, nodestate.Flags{}, time.Second*10) - } else { - s.setRedialWait(n, queryCost, queryWaitStep) - } - s.ns.SetStateSub(n, nodestate.Flags{}, sfQueried, 0) - }) - }() - } + s.ns.SetStateSub(n, nodestate.Flags{}, sfQuery, 0) + }) + }() }) return NewQueueIterator(s.ns, sfCanDial, nodestate.Flags{}, false, func(waiting bool) { if waiting { diff --git a/les/vflux/server/balance.go b/les/vflux/server/balance.go index db12a5c573..01e645a16a 100644 --- a/les/vflux/server/balance.go +++ b/les/vflux/server/balance.go @@ -47,21 +47,57 @@ type PriceFactors struct { TimeFactor, CapacityFactor, RequestFactor float64 } -// timePrice returns the price of connection per nanosecond at the given capacity -func (p PriceFactors) timePrice(cap uint64) float64 { - return p.TimeFactor + float64(cap)*p.CapacityFactor/1000000 +// connectionPrice returns the price of connection per nanosecond at the given capacity +// and the estimated average request cost. +func (p PriceFactors) connectionPrice(cap uint64, avgReqCost float64) float64 { + return p.TimeFactor + float64(cap)*p.CapacityFactor/1000000 + p.RequestFactor*avgReqCost } -// NodeBalance keeps track of the positive and negative balances of a connected +type ( + // nodePriority interface provides current and estimated future priorities on demand + nodePriority interface { + // priority should return the current priority of the node (higher is better) + priority(cap uint64) int64 + // estimatePriority should return a lower estimate for the minimum of the node priority + // value starting from the current moment until the given time. If the priority goes + // under the returned estimate before the specified moment then it is the caller's + // responsibility to signal with updateFlag. + estimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64 + } + + // ReadOnlyBalance provides read-only operations on the node balance + ReadOnlyBalance interface { + nodePriority + GetBalance() (uint64, uint64) + GetRawBalance() (utils.ExpiredValue, utils.ExpiredValue) + GetPriceFactors() (posFactor, negFactor PriceFactors) + } + + // ConnectedBalance provides operations permitted on connected nodes (non-read-only + // operations are not permitted inside a BalanceOperation) + ConnectedBalance interface { + ReadOnlyBalance + SetPriceFactors(posFactor, negFactor PriceFactors) + RequestServed(cost uint64) uint64 + } + + // AtomicBalanceOperator provides operations permitted in an atomic BalanceOperation + AtomicBalanceOperator interface { + ReadOnlyBalance + AddBalance(amount int64) (uint64, uint64, error) + SetBalance(pos, neg uint64) error + } +) + +// nodeBalance keeps track of the positive and negative balances of a connected // client and calculates actual and projected future priority values. // Implements nodePriority interface. -type NodeBalance struct { - bt *BalanceTracker +type nodeBalance struct { + bt *balanceTracker lock sync.RWMutex node *enode.Node connAddress string - active bool - priority bool + active, hasPriority, setFlags bool capacity uint64 balance balance posFactor, negFactor PriceFactors @@ -78,7 +114,62 @@ type NodeBalance struct { // balance represents a pair of positive and negative balances type balance struct { - pos, neg utils.ExpiredValue + pos, neg utils.ExpiredValue + posExp, negExp utils.ValueExpirer +} + +// posValue returns the value of positive balance at a given timestamp. +func (b balance) posValue(now mclock.AbsTime) uint64 { + return b.pos.Value(b.posExp.LogOffset(now)) +} + +// negValue returns the value of negative balance at a given timestamp. +func (b balance) negValue(now mclock.AbsTime) uint64 { + return b.neg.Value(b.negExp.LogOffset(now)) +} + +// addValue adds the value of a given amount to the balance. The original value and +// updated value will also be returned if the addition is successful. +// Returns the error if the given value is too large and the value overflows. +func (b *balance) addValue(now mclock.AbsTime, amount int64, pos bool, force bool) (uint64, uint64, int64, error) { + var ( + val utils.ExpiredValue + offset utils.Fixed64 + ) + if pos { + offset, val = b.posExp.LogOffset(now), b.pos + } else { + offset, val = b.negExp.LogOffset(now), b.neg + } + old := val.Value(offset) + if amount > 0 && (amount > maxBalance || old > maxBalance-uint64(amount)) { + if !force { + return old, 0, 0, errBalanceOverflow + } + val = utils.ExpiredValue{} + amount = maxBalance + } + net := val.Add(amount, offset) + if pos { + b.pos = val + } else { + b.neg = val + } + return old, val.Value(offset), net, nil +} + +// setValue sets the internal balance amount to the given values. Returns the +// error if the given value is too large. +func (b *balance) setValue(now mclock.AbsTime, pos uint64, neg uint64) error { + if pos > maxBalance || neg > maxBalance { + return errBalanceOverflow + } + var pb, nb utils.ExpiredValue + pb.Add(int64(pos), b.posExp.LogOffset(now)) + nb.Add(int64(neg), b.negExp.LogOffset(now)) + b.pos = pb + b.neg = nb + return nil } // balanceCallback represents a single callback that is activated when client priority @@ -90,18 +181,18 @@ type balanceCallback struct { } // GetBalance returns the current positive and negative balance. -func (n *NodeBalance) GetBalance() (uint64, uint64) { +func (n *nodeBalance) GetBalance() (uint64, uint64) { n.lock.Lock() defer n.lock.Unlock() now := n.bt.clock.Now() n.updateBalance(now) - return n.balance.pos.Value(n.bt.posExp.LogOffset(now)), n.balance.neg.Value(n.bt.negExp.LogOffset(now)) + return n.balance.posValue(now), n.balance.negValue(now) } // GetRawBalance returns the current positive and negative balance // but in the raw(expired value) format. -func (n *NodeBalance) GetRawBalance() (utils.ExpiredValue, utils.ExpiredValue) { +func (n *nodeBalance) GetRawBalance() (utils.ExpiredValue, utils.ExpiredValue) { n.lock.Lock() defer n.lock.Unlock() @@ -114,164 +205,147 @@ func (n *NodeBalance) GetRawBalance() (utils.ExpiredValue, utils.ExpiredValue) { // before and after the operation. Exceeding maxBalance results in an error (balance is // unchanged) while adding a negative amount higher than the current balance results in // zero balance. -func (n *NodeBalance) AddBalance(amount int64) (uint64, uint64, error) { +// Note: this function should run inside a NodeStateMachine operation +func (n *nodeBalance) AddBalance(amount int64) (uint64, uint64, error) { var ( - err error - old, new uint64 + err error + old, new uint64 + now = n.bt.clock.Now() + callbacks []func() + setPriority bool ) - n.bt.ns.Operation(func() { - var ( - callbacks []func() - setPriority bool - ) - n.bt.updateTotalBalance(n, func() bool { - now := n.bt.clock.Now() - n.updateBalance(now) - - // Ensure the given amount is valid to apply. - offset := n.bt.posExp.LogOffset(now) - old = n.balance.pos.Value(offset) - if amount > 0 && (amount > maxBalance || old > maxBalance-uint64(amount)) { - err = errBalanceOverflow - return false - } - - // Update the total positive balance counter. - n.balance.pos.Add(amount, offset) - callbacks = n.checkCallbacks(now) - setPriority = n.checkPriorityStatus() - new = n.balance.pos.Value(offset) - n.storeBalance(true, false) - return true - }) - for _, cb := range callbacks { - cb() - } - if setPriority { - n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0) + // Operation with holding the lock + n.bt.updateTotalBalance(n, func() bool { + n.updateBalance(now) + if old, new, _, err = n.balance.addValue(now, amount, true, false); err != nil { + return false } - n.signalPriorityUpdate() + callbacks, setPriority = n.checkCallbacks(now), n.checkPriorityStatus() + n.storeBalance(true, false) + return true }) if err != nil { return old, old, err } - + // Operation without holding the lock + for _, cb := range callbacks { + cb() + } + if n.setFlags { + if setPriority { + n.bt.ns.SetStateSub(n.node, n.bt.setup.priorityFlag, nodestate.Flags{}, 0) + } + // Note: priority flag is automatically removed by the zero priority callback if necessary + n.signalPriorityUpdate() + } return old, new, nil } // SetBalance sets the positive and negative balance to the given values -func (n *NodeBalance) SetBalance(pos, neg uint64) error { - if pos > maxBalance || neg > maxBalance { - return errBalanceOverflow - } - n.bt.ns.Operation(func() { - var ( - callbacks []func() - setPriority bool - ) - n.bt.updateTotalBalance(n, func() bool { - now := n.bt.clock.Now() - n.updateBalance(now) - - var pb, nb utils.ExpiredValue - pb.Add(int64(pos), n.bt.posExp.LogOffset(now)) - nb.Add(int64(neg), n.bt.negExp.LogOffset(now)) - n.balance.pos = pb - n.balance.neg = nb - callbacks = n.checkCallbacks(now) - setPriority = n.checkPriorityStatus() - n.storeBalance(true, true) - return true - }) - for _, cb := range callbacks { - cb() +// Note: this function should run inside a NodeStateMachine operation +func (n *nodeBalance) SetBalance(pos, neg uint64) error { + var ( + now = n.bt.clock.Now() + callbacks []func() + setPriority bool + ) + // Operation with holding the lock + n.bt.updateTotalBalance(n, func() bool { + n.updateBalance(now) + if err := n.balance.setValue(now, pos, neg); err != nil { + return false } + callbacks, setPriority = n.checkCallbacks(now), n.checkPriorityStatus() + n.storeBalance(true, true) + return true + }) + // Operation without holding the lock + for _, cb := range callbacks { + cb() + } + if n.setFlags { if setPriority { - n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0) + n.bt.ns.SetStateSub(n.node, n.bt.setup.priorityFlag, nodestate.Flags{}, 0) } + // Note: priority flag is automatically removed by the zero priority callback if necessary n.signalPriorityUpdate() - }) + } return nil } // RequestServed should be called after serving a request for the given peer -func (n *NodeBalance) RequestServed(cost uint64) uint64 { +func (n *nodeBalance) RequestServed(cost uint64) (newBalance uint64) { n.lock.Lock() - var callbacks []func() - defer func() { - n.lock.Unlock() - if callbacks != nil { - n.bt.ns.Operation(func() { - for _, cb := range callbacks { - cb() - } - }) - } - }() - now := n.bt.clock.Now() + var ( + check bool + fcost = float64(cost) + now = n.bt.clock.Now() + ) n.updateBalance(now) - fcost := float64(cost) - - posExp := n.bt.posExp.LogOffset(now) - var check bool if !n.balance.pos.IsZero() { - if n.posFactor.RequestFactor != 0 { - c := -int64(fcost * n.posFactor.RequestFactor) - cc := n.balance.pos.Add(c, posExp) - if c == cc { + posCost := -int64(fcost * n.posFactor.RequestFactor) + if posCost == 0 { + fcost = 0 + newBalance = n.balance.posValue(now) + } else { + var net int64 + _, newBalance, net, _ = n.balance.addValue(now, posCost, true, false) + if posCost == net { fcost = 0 } else { - fcost *= 1 - float64(cc)/float64(c) + fcost *= 1 - float64(net)/float64(posCost) } check = true - } else { - fcost = 0 } } - if fcost > 0 { - if n.negFactor.RequestFactor != 0 { - n.balance.neg.Add(int64(fcost*n.negFactor.RequestFactor), n.bt.negExp.LogOffset(now)) - check = true - } + if fcost > 0 && n.negFactor.RequestFactor != 0 { + n.balance.addValue(now, int64(fcost*n.negFactor.RequestFactor), false, false) + check = true } + n.sumReqCost += cost + + var callbacks []func() if check { callbacks = n.checkCallbacks(now) } - n.sumReqCost += cost - return n.balance.pos.Value(posExp) + n.lock.Unlock() + + if callbacks != nil { + n.bt.ns.Operation(func() { + for _, cb := range callbacks { + cb() + } + }) + } + return } -// Priority returns the actual priority based on the current balance -func (n *NodeBalance) Priority(capacity uint64) int64 { +// priority returns the actual priority based on the current balance +func (n *nodeBalance) priority(capacity uint64) int64 { n.lock.Lock() defer n.lock.Unlock() - n.updateBalance(n.bt.clock.Now()) - return n.balanceToPriority(n.balance, capacity) + now := n.bt.clock.Now() + n.updateBalance(now) + return n.balanceToPriority(now, n.balance, capacity) } // EstMinPriority gives a lower estimate for the priority at a given time in the future. // An average request cost per time is assumed that is twice the average cost per time // in the current session. -// If update is true then a priority callback is added that turns UpdateFlag on and off +// If update is true then a priority callback is added that turns updateFlag on and off // in case the priority goes below the estimated minimum. -func (n *NodeBalance) EstimatePriority(capacity uint64, addBalance int64, future, bias time.Duration, update bool) int64 { +func (n *nodeBalance) estimatePriority(capacity uint64, addBalance int64, future, bias time.Duration, update bool) int64 { n.lock.Lock() defer n.lock.Unlock() now := n.bt.clock.Now() n.updateBalance(now) - b := n.balance + + b := n.balance // copy the balance if addBalance != 0 { - offset := n.bt.posExp.LogOffset(now) - old := n.balance.pos.Value(offset) - if addBalance > 0 && (addBalance > maxBalance || old > maxBalance-uint64(addBalance)) { - b.pos = utils.ExpiredValue{} - b.pos.Add(maxBalance, offset) - } else { - b.pos.Add(addBalance, offset) - } + b.addValue(now, addBalance, true, true) } if future > 0 { var avgReqCost float64 @@ -284,52 +358,20 @@ func (n *NodeBalance) EstimatePriority(capacity uint64, addBalance int64, future if bias > 0 { b = n.reducedBalance(b, now+mclock.AbsTime(future), bias, capacity, 0) } - pri := n.balanceToPriority(b, capacity) + // Note: we subtract one from the estimated priority in order to ensure that biased + // estimates are always lower than actual priorities, even if the bias is very small. + // This ensures that two nodes will not ping-pong update signals forever if both of + // them have zero estimated priority drop in the projected future. + pri := n.balanceToPriority(now, b, capacity) - 1 if update { n.addCallback(balanceCallbackUpdate, pri, n.signalPriorityUpdate) } return pri } -// PosBalanceMissing calculates the missing amount of positive balance in order to -// connect at targetCapacity, stay connected for the given amount of time and then -// still have a priority of targetPriority -func (n *NodeBalance) PosBalanceMissing(targetPriority int64, targetCapacity uint64, after time.Duration) uint64 { - n.lock.Lock() - defer n.lock.Unlock() - - now := n.bt.clock.Now() - if targetPriority < 0 { - timePrice := n.negFactor.timePrice(targetCapacity) - timeCost := uint64(float64(after) * timePrice) - negBalance := n.balance.neg.Value(n.bt.negExp.LogOffset(now)) - if timeCost+negBalance < uint64(-targetPriority) { - return 0 - } - if uint64(-targetPriority) > negBalance && timePrice > 1e-100 { - if negTime := time.Duration(float64(uint64(-targetPriority)-negBalance) / timePrice); negTime < after { - after -= negTime - } else { - after = 0 - } - } - targetPriority = 0 - } - timePrice := n.posFactor.timePrice(targetCapacity) - posRequired := uint64(float64(targetPriority)*float64(targetCapacity)+float64(after)*timePrice) + 1 - if posRequired >= maxBalance { - return math.MaxUint64 // target not reachable - } - posBalance := n.balance.pos.Value(n.bt.posExp.LogOffset(now)) - if posRequired > posBalance { - return posRequired - posBalance - } - return 0 -} - // SetPriceFactors sets the price factors. TimeFactor is the price of a nanosecond of // connection while RequestFactor is the price of a request cost unit. -func (n *NodeBalance) SetPriceFactors(posFactor, negFactor PriceFactors) { +func (n *nodeBalance) SetPriceFactors(posFactor, negFactor PriceFactors) { n.lock.Lock() now := n.bt.clock.Now() n.updateBalance(now) @@ -346,7 +388,7 @@ func (n *NodeBalance) SetPriceFactors(posFactor, negFactor PriceFactors) { } // GetPriceFactors returns the price factors -func (n *NodeBalance) GetPriceFactors() (posFactor, negFactor PriceFactors) { +func (n *nodeBalance) GetPriceFactors() (posFactor, negFactor PriceFactors) { n.lock.Lock() defer n.lock.Unlock() @@ -354,7 +396,7 @@ func (n *NodeBalance) GetPriceFactors() (posFactor, negFactor PriceFactors) { } // activate starts time/capacity cost deduction. -func (n *NodeBalance) activate() { +func (n *nodeBalance) activate() { n.bt.updateTotalBalance(n, func() bool { if n.active { return false @@ -366,7 +408,7 @@ func (n *NodeBalance) activate() { } // deactivate stops time/capacity cost deduction and saves the balances in the database -func (n *NodeBalance) deactivate() { +func (n *nodeBalance) deactivate() { n.bt.updateTotalBalance(n, func() bool { if !n.active { return false @@ -383,7 +425,7 @@ func (n *NodeBalance) deactivate() { } // updateBalance updates balance based on the time factor -func (n *NodeBalance) updateBalance(now mclock.AbsTime) { +func (n *nodeBalance) updateBalance(now mclock.AbsTime) { if n.active && now > n.lastUpdate { n.balance = n.reducedBalance(n.balance, n.lastUpdate, time.Duration(now-n.lastUpdate), n.capacity, 0) n.lastUpdate = now @@ -391,7 +433,7 @@ func (n *NodeBalance) updateBalance(now mclock.AbsTime) { } // storeBalance stores the positive and/or negative balance of the node in the database -func (n *NodeBalance) storeBalance(pos, neg bool) { +func (n *nodeBalance) storeBalance(pos, neg bool) { if pos { n.bt.storeBalance(n.node.ID().Bytes(), false, n.balance.pos) } @@ -405,7 +447,7 @@ func (n *NodeBalance) storeBalance(pos, neg bool) { // immediately. // Note: should be called while n.lock is held // Note 2: the callback function runs inside a NodeStateMachine operation -func (n *NodeBalance) addCallback(id int, threshold int64, callback func()) { +func (n *nodeBalance) addCallback(id int, threshold int64, callback func()) { n.removeCallback(id) idx := 0 for idx < n.callbackCount && threshold > n.callbacks[idx].threshold { @@ -425,7 +467,7 @@ func (n *NodeBalance) addCallback(id int, threshold int64, callback func()) { // removeCallback removes the given callback and returns true if it was active // Note: should be called while n.lock is held -func (n *NodeBalance) removeCallback(id int) bool { +func (n *nodeBalance) removeCallback(id int) bool { idx := n.callbackIndex[id] if idx == -1 { return false @@ -442,11 +484,11 @@ func (n *NodeBalance) removeCallback(id int) bool { // checkCallbacks checks whether the threshold of any of the active callbacks // have been reached and returns triggered callbacks. // Note: checkCallbacks assumes that the balance has been recently updated. -func (n *NodeBalance) checkCallbacks(now mclock.AbsTime) (callbacks []func()) { +func (n *nodeBalance) checkCallbacks(now mclock.AbsTime) (callbacks []func()) { if n.callbackCount == 0 || n.capacity == 0 { return } - pri := n.balanceToPriority(n.balance, n.capacity) + pri := n.balanceToPriority(now, n.balance, n.capacity) for n.callbackCount != 0 && n.callbacks[n.callbackCount-1].threshold >= pri { n.callbackCount-- n.callbackIndex[n.callbacks[n.callbackCount].id] = -1 @@ -458,7 +500,7 @@ func (n *NodeBalance) checkCallbacks(now mclock.AbsTime) (callbacks []func()) { // scheduleCheck sets up or updates a scheduled event to ensure that it will be called // again just after the next threshold has been reached. -func (n *NodeBalance) scheduleCheck(now mclock.AbsTime) { +func (n *nodeBalance) scheduleCheck(now mclock.AbsTime) { if n.callbackCount != 0 { d, ok := n.timeUntil(n.callbacks[n.callbackCount-1].threshold) if !ok { @@ -484,7 +526,7 @@ func (n *NodeBalance) scheduleCheck(now mclock.AbsTime) { } // updateAfter schedules a balance update and callback check in the future -func (n *NodeBalance) updateAfter(dt time.Duration) { +func (n *nodeBalance) updateAfter(dt time.Duration) { if n.updateEvent == nil || n.updateEvent.Stop() { if dt == 0 { n.updateEvent = nil @@ -512,20 +554,22 @@ func (n *NodeBalance) updateAfter(dt time.Duration) { // balanceExhausted should be called when the positive balance is exhausted (priority goes to zero/negative) // Note: this function should run inside a NodeStateMachine operation -func (n *NodeBalance) balanceExhausted() { +func (n *nodeBalance) balanceExhausted() { n.lock.Lock() n.storeBalance(true, false) - n.priority = false + n.hasPriority = false n.lock.Unlock() - n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.PriorityFlag, 0) + if n.setFlags { + n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.setup.priorityFlag, 0) + } } // checkPriorityStatus checks whether the node has gained priority status and sets the priority // callback and flag if necessary. It assumes that the balance has been recently updated. // Note that the priority flag has to be set by the caller after the mutex has been released. -func (n *NodeBalance) checkPriorityStatus() bool { - if !n.priority && !n.balance.pos.IsZero() { - n.priority = true +func (n *nodeBalance) checkPriorityStatus() bool { + if !n.hasPriority && !n.balance.pos.IsZero() { + n.hasPriority = true n.addCallback(balanceCallbackZero, 0, func() { n.balanceExhausted() }) return true } @@ -534,15 +578,15 @@ func (n *NodeBalance) checkPriorityStatus() bool { // signalPriorityUpdate signals that the priority fell below the previous minimum estimate // Note: this function should run inside a NodeStateMachine operation -func (n *NodeBalance) signalPriorityUpdate() { - n.bt.ns.SetStateSub(n.node, n.bt.UpdateFlag, nodestate.Flags{}, 0) - n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.UpdateFlag, 0) +func (n *nodeBalance) signalPriorityUpdate() { + n.bt.ns.SetStateSub(n.node, n.bt.setup.updateFlag, nodestate.Flags{}, 0) + n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.setup.updateFlag, 0) } // setCapacity updates the capacity value used for priority calculation // Note: capacity should never be zero // Note 2: this function should run inside a NodeStateMachine operation -func (n *NodeBalance) setCapacity(capacity uint64) { +func (n *nodeBalance) setCapacity(capacity uint64) { n.lock.Lock() now := n.bt.clock.Now() n.updateBalance(now) @@ -557,74 +601,89 @@ func (n *NodeBalance) setCapacity(capacity uint64) { // balanceToPriority converts a balance to a priority value. Lower priority means // first to disconnect. Positive balance translates to positive priority. If positive // balance is zero then negative balance translates to a negative priority. -func (n *NodeBalance) balanceToPriority(b balance, capacity uint64) int64 { - if !b.pos.IsZero() { - return int64(b.pos.Value(n.bt.posExp.LogOffset(n.bt.clock.Now())) / capacity) +func (n *nodeBalance) balanceToPriority(now mclock.AbsTime, b balance, capacity uint64) int64 { + pos := b.posValue(now) + if pos > 0 { + return int64(pos / capacity) + } + return -int64(b.negValue(now)) +} + +// priorityToBalance converts a target priority to a requested balance value. +// If the priority is negative, then minimal negative balance is returned; +// otherwise the minimal positive balance is returned. +func (n *nodeBalance) priorityToBalance(priority int64, capacity uint64) (uint64, uint64) { + if priority > 0 { + return uint64(priority) * n.capacity, 0 } - return -int64(b.neg.Value(n.bt.negExp.LogOffset(n.bt.clock.Now()))) + return 0, uint64(-priority) } // reducedBalance estimates the reduced balance at a given time in the fututre based // on the given balance, the time factor and an estimated average request cost per time ratio -func (n *NodeBalance) reducedBalance(b balance, start mclock.AbsTime, dt time.Duration, capacity uint64, avgReqCost float64) balance { +func (n *nodeBalance) reducedBalance(b balance, start mclock.AbsTime, dt time.Duration, capacity uint64, avgReqCost float64) balance { // since the costs are applied continuously during the dt time period we calculate // the expiration offset at the middle of the period - at := start + mclock.AbsTime(dt/2) - dtf := float64(dt) + var ( + at = start + mclock.AbsTime(dt/2) + dtf = float64(dt) + ) if !b.pos.IsZero() { - factor := n.posFactor.timePrice(capacity) + n.posFactor.RequestFactor*avgReqCost + factor := n.posFactor.connectionPrice(capacity, avgReqCost) diff := -int64(dtf * factor) - dd := b.pos.Add(diff, n.bt.posExp.LogOffset(at)) - if dd == diff { + _, _, net, _ := b.addValue(at, diff, true, false) + if net == diff { dtf = 0 } else { - dtf += float64(dd) / factor + dtf += float64(net) / factor } } - if dt > 0 { - factor := n.negFactor.timePrice(capacity) + n.negFactor.RequestFactor*avgReqCost - b.neg.Add(int64(dtf*factor), n.bt.negExp.LogOffset(at)) + if dtf > 0 { + factor := n.negFactor.connectionPrice(capacity, avgReqCost) + b.addValue(at, int64(dtf*factor), false, false) } return b } // timeUntil calculates the remaining time needed to reach a given priority level // assuming that no requests are processed until then. If the given level is never -// reached then (0, false) is returned. +// reached then (0, false) is returned. If it has already been reached then (0, true) +// is returned. // Note: the function assumes that the balance has been recently updated and // calculates the time starting from the last update. -func (n *NodeBalance) timeUntil(priority int64) (time.Duration, bool) { - now := n.bt.clock.Now() - var dt float64 - if !n.balance.pos.IsZero() { - posBalance := n.balance.pos.Value(n.bt.posExp.LogOffset(now)) - timePrice := n.posFactor.timePrice(n.capacity) +func (n *nodeBalance) timeUntil(priority int64) (time.Duration, bool) { + var ( + now = n.bt.clock.Now() + pos = n.balance.posValue(now) + targetPos, targetNeg = n.priorityToBalance(priority, n.capacity) + diffTime float64 + ) + if pos > 0 { + timePrice := n.posFactor.connectionPrice(n.capacity, 0) if timePrice < 1e-100 { return 0, false } - if priority > 0 { - newBalance := uint64(priority) * n.capacity - if newBalance > posBalance { - return 0, false + if targetPos > 0 { + if targetPos > pos { + return 0, true } - dt = float64(posBalance-newBalance) / timePrice - return time.Duration(dt), true + diffTime = float64(pos-targetPos) / timePrice + return time.Duration(diffTime), true } else { - dt = float64(posBalance) / timePrice + diffTime = float64(pos) / timePrice } } else { - if priority > 0 { - return 0, false + if targetPos > 0 { + return 0, true } } - // if we have a positive balance then dt equals the time needed to get it to zero - negBalance := n.balance.neg.Value(n.bt.negExp.LogOffset(now)) - timePrice := n.negFactor.timePrice(n.capacity) - if uint64(-priority) > negBalance { + neg := n.balance.negValue(now) + if targetNeg > neg { + timePrice := n.negFactor.connectionPrice(n.capacity, 0) if timePrice < 1e-100 { return 0, false } - dt += float64(uint64(-priority)-negBalance) / timePrice + diffTime += float64(targetNeg-neg) / timePrice } - return time.Duration(dt), true + return time.Duration(diffTime), true } diff --git a/les/vflux/server/balance_test.go b/les/vflux/server/balance_test.go index e22074db2d..66f0d1f301 100644 --- a/les/vflux/server/balance_test.go +++ b/les/vflux/server/balance_test.go @@ -24,6 +24,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/les/utils" "github.com/ethereum/go-ethereum/p2p/enode" @@ -31,59 +32,82 @@ import ( "github.com/ethereum/go-ethereum/p2p/nodestate" ) -var ( - testFlag = testSetup.NewFlag("testFlag") - connAddrFlag = testSetup.NewField("connAddr", reflect.TypeOf("")) - btTestSetup = NewBalanceTrackerSetup(testSetup) -) - -func init() { - btTestSetup.Connect(connAddrFlag, ppTestSetup.CapacityField) -} - type zeroExpirer struct{} func (z zeroExpirer) SetRate(now mclock.AbsTime, rate float64) {} func (z zeroExpirer) SetLogOffset(now mclock.AbsTime, logOffset utils.Fixed64) {} func (z zeroExpirer) LogOffset(now mclock.AbsTime) utils.Fixed64 { return 0 } +type balanceTestClient struct{} + +func (client balanceTestClient) FreeClientId() string { return "" } + type balanceTestSetup struct { clock *mclock.Simulated + db ethdb.KeyValueStore ns *nodestate.NodeStateMachine - bt *BalanceTracker + setup *serverSetup + bt *balanceTracker } -func newBalanceTestSetup() *balanceTestSetup { +func newBalanceTestSetup(db ethdb.KeyValueStore, posExp, negExp utils.ValueExpirer) *balanceTestSetup { + // Initialize and customize the setup for the balance testing clock := &mclock.Simulated{} - ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) - db := memorydb.New() - bt := NewBalanceTracker(ns, btTestSetup, db, clock, zeroExpirer{}, zeroExpirer{}) + setup := newServerSetup() + setup.clientField = setup.setup.NewField("balancTestClient", reflect.TypeOf(balanceTestClient{})) + + ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup) + if posExp == nil { + posExp = zeroExpirer{} + } + if negExp == nil { + negExp = zeroExpirer{} + } + if db == nil { + db = memorydb.New() + } + bt := newBalanceTracker(ns, setup, db, clock, posExp, negExp) ns.Start() return &balanceTestSetup{ clock: clock, + db: db, ns: ns, + setup: setup, bt: bt, } } -func (b *balanceTestSetup) newNode(capacity uint64) *NodeBalance { +func (b *balanceTestSetup) newNode(capacity uint64) *nodeBalance { node := enode.SignNull(&enr.Record{}, enode.ID{}) - b.ns.SetState(node, testFlag, nodestate.Flags{}, 0) - b.ns.SetField(node, btTestSetup.connAddressField, "") + b.ns.SetField(node, b.setup.clientField, balanceTestClient{}) if capacity != 0 { - b.ns.SetField(node, ppTestSetup.CapacityField, capacity) + b.ns.SetField(node, b.setup.capacityField, capacity) } - n, _ := b.ns.GetField(node, btTestSetup.BalanceField).(*NodeBalance) + n, _ := b.ns.GetField(node, b.setup.balanceField).(*nodeBalance) return n } +func (b *balanceTestSetup) setBalance(node *nodeBalance, pos, neg uint64) (err error) { + b.bt.BalanceOperation(node.node.ID(), node.connAddress, func(balance AtomicBalanceOperator) { + err = balance.SetBalance(pos, neg) + }) + return +} + +func (b *balanceTestSetup) addBalance(node *nodeBalance, add int64) (old, new uint64, err error) { + b.bt.BalanceOperation(node.node.ID(), node.connAddress, func(balance AtomicBalanceOperator) { + old, new, err = balance.AddBalance(add) + }) + return +} + func (b *balanceTestSetup) stop() { - b.bt.Stop() + b.bt.stop() b.ns.Stop() } func TestAddBalance(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000) @@ -100,7 +124,7 @@ func TestAddBalance(t *testing.T) { {maxBalance, [2]uint64{0, 0}, 0, true}, } for _, i := range inputs { - old, new, err := node.AddBalance(i.delta) + old, new, err := b.addBalance(node, i.delta) if i.expectErr { if err == nil { t.Fatalf("Expect get error but nil") @@ -119,7 +143,7 @@ func TestAddBalance(t *testing.T) { } func TestSetBalance(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000) @@ -130,9 +154,8 @@ func TestSetBalance(t *testing.T) { {0, 1000}, {1000, 1000}, } - for _, i := range inputs { - node.SetBalance(i.pos, i.neg) + b.setBalance(node, i.pos, i.neg) pos, neg := node.GetBalance() if pos != i.pos { t.Fatalf("Positive balance mismatch, want %v, got %v", i.pos, pos) @@ -144,13 +167,12 @@ func TestSetBalance(t *testing.T) { } func TestBalanceTimeCost(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000) - b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1)) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) - node.SetBalance(uint64(time.Minute), 0) // 1 minute time allowance + b.setBalance(node, uint64(time.Minute), 0) // 1 minute time allowance var inputs = []struct { runTime time.Duration @@ -172,7 +194,7 @@ func TestBalanceTimeCost(t *testing.T) { } } - node.SetBalance(uint64(time.Minute), 0) // Refill 1 minute time allowance + b.setBalance(node, uint64(time.Minute), 0) // Refill 1 minute time allowance for _, i := range inputs { b.clock.Run(i.runTime) if pos, _ := node.GetBalance(); pos != i.expPos { @@ -185,13 +207,12 @@ func TestBalanceTimeCost(t *testing.T) { } func TestBalanceReqCost(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) - b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1)) - node.SetBalance(uint64(time.Minute), 0) // 1 minute time serving time allowance + b.setBalance(node, uint64(time.Minute), 0) // 1 minute time serving time allowance var inputs = []struct { reqCost uint64 expPos uint64 @@ -214,7 +235,7 @@ func TestBalanceReqCost(t *testing.T) { } func TestBalanceToPriority(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) @@ -230,22 +251,20 @@ func TestBalanceToPriority(t *testing.T) { {0, 1000, -1000}, } for _, i := range inputs { - node.SetBalance(i.pos, i.neg) - priority := node.Priority(1000) + b.setBalance(node, i.pos, i.neg) + priority := node.priority(1000) if priority != i.priority { - t.Fatalf("Priority mismatch, want %v, got %v", i.priority, priority) + t.Fatalf("priority mismatch, want %v, got %v", i.priority, priority) } } } func TestEstimatedPriority(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000000000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) - - b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1)) - node.SetBalance(uint64(time.Minute), 0) + b.setBalance(node, uint64(time.Minute), 0) var inputs = []struct { runTime time.Duration // time cost futureTime time.Duration // diff of future time @@ -272,47 +291,18 @@ func TestEstimatedPriority(t *testing.T) { for _, i := range inputs { b.clock.Run(i.runTime) node.RequestServed(i.reqCost) - priority := node.EstimatePriority(1000000000, 0, i.futureTime, 0, false) - if priority != i.priority { - t.Fatalf("Estimated priority mismatch, want %v, got %v", i.priority, priority) - } - } -} - -func TestPosBalanceMissing(t *testing.T) { - b := newBalanceTestSetup() - defer b.stop() - node := b.newNode(1000) - node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) - - b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1)) - var inputs = []struct { - pos, neg uint64 - priority int64 - cap uint64 - after time.Duration - expect uint64 - }{ - {uint64(time.Second * 2), 0, 0, 1, time.Second, 0}, - {uint64(time.Second * 2), 0, 0, 1, 2 * time.Second, 1}, - {uint64(time.Second * 2), 0, int64(time.Second), 1, 2 * time.Second, uint64(time.Second) + 1}, - {0, 0, int64(time.Second), 1, time.Second, uint64(2*time.Second) + 1}, - {0, 0, -int64(time.Second), 1, time.Second, 1}, - } - for _, i := range inputs { - node.SetBalance(i.pos, i.neg) - got := node.PosBalanceMissing(i.priority, i.cap, i.after) - if got != i.expect { - t.Fatalf("Missing budget mismatch, want %v, got %v", i.expect, got) + priority := node.estimatePriority(1000000000, 0, i.futureTime, 0, false) + if priority != i.priority-1 { + t.Fatalf("Estimated priority mismatch, want %v, got %v", i.priority-1, priority) } } } func TestPostiveBalanceCounting(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() - var nodes []*NodeBalance + var nodes []*nodeBalance for i := 0; i < 100; i += 1 { node := b.newNode(1000000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) @@ -323,7 +313,7 @@ func TestPostiveBalanceCounting(t *testing.T) { var sum uint64 for i := 0; i < 100; i += 1 { amount := int64(rand.Intn(100) + 100) - nodes[i].AddBalance(amount) + b.addBalance(nodes[i], amount) sum += uint64(amount) } if b.bt.TotalTokenAmount() != sum { @@ -333,7 +323,7 @@ func TestPostiveBalanceCounting(t *testing.T) { // Change client status for i := 0; i < 100; i += 1 { if rand.Intn(2) == 0 { - b.ns.SetField(nodes[i].node, ppTestSetup.CapacityField, uint64(1)) + b.ns.SetField(nodes[i].node, b.setup.capacityField, uint64(1)) } } if b.bt.TotalTokenAmount() != sum { @@ -341,7 +331,7 @@ func TestPostiveBalanceCounting(t *testing.T) { } for i := 0; i < 100; i += 1 { if rand.Intn(2) == 0 { - b.ns.SetField(nodes[i].node, ppTestSetup.CapacityField, uint64(1)) + b.ns.SetField(nodes[i].node, b.setup.capacityField, uint64(1)) } } if b.bt.TotalTokenAmount() != sum { @@ -350,7 +340,7 @@ func TestPostiveBalanceCounting(t *testing.T) { } func TestCallbackChecking(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) @@ -363,7 +353,7 @@ func TestCallbackChecking(t *testing.T) { {0, time.Second}, {-int64(time.Second), 2 * time.Second}, } - node.SetBalance(uint64(time.Second), 0) + b.setBalance(node, uint64(time.Second), 0) for _, i := range inputs { diff, _ := node.timeUntil(i.priority) if diff != i.expDiff { @@ -373,14 +363,13 @@ func TestCallbackChecking(t *testing.T) { } func TestCallback(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) - b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1)) callCh := make(chan struct{}, 1) - node.SetBalance(uint64(time.Minute), 0) + b.setBalance(node, uint64(time.Minute), 0) node.addCallback(balanceCallbackZero, 0, func() { callCh <- struct{}{} }) b.clock.Run(time.Minute) @@ -390,7 +379,7 @@ func TestCallback(t *testing.T) { t.Fatalf("Callback hasn't been called yet") } - node.SetBalance(uint64(time.Minute), 0) + b.setBalance(node, uint64(time.Minute), 0) node.addCallback(balanceCallbackZero, 0, func() { callCh <- struct{}{} }) node.removeCallback(balanceCallbackZero) @@ -403,23 +392,14 @@ func TestCallback(t *testing.T) { } func TestBalancePersistence(t *testing.T) { - clock := &mclock.Simulated{} - ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) - db := memorydb.New() posExp := &utils.Expirer{} negExp := &utils.Expirer{} - posExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour*2)) // halves every two hours - negExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour)) // halves every hour - bt := NewBalanceTracker(ns, btTestSetup, db, clock, posExp, negExp) - ns.Start() - bts := &balanceTestSetup{ - clock: clock, - ns: ns, - bt: bt, - } - var nb *NodeBalance - exp := func(expPos, expNeg uint64) { - pos, neg := nb.GetBalance() + posExp.SetRate(0, math.Log(2)/float64(time.Hour*2)) // halves every two hours + negExp.SetRate(0, math.Log(2)/float64(time.Hour)) // halves every hour + setup := newBalanceTestSetup(nil, posExp, negExp) + + exp := func(balance *nodeBalance, expPos, expNeg uint64) { + pos, neg := balance.GetBalance() if pos != expPos { t.Fatalf("Positive balance incorrect, want %v, got %v", expPos, pos) } @@ -428,44 +408,32 @@ func TestBalancePersistence(t *testing.T) { } } expTotal := func(expTotal uint64) { - total := bt.TotalTokenAmount() + total := setup.bt.TotalTokenAmount() if total != expTotal { t.Fatalf("Total token amount incorrect, want %v, got %v", expTotal, total) } } expTotal(0) - nb = bts.newNode(0) + balance := setup.newNode(0) expTotal(0) - nb.SetBalance(16000000000, 16000000000) - exp(16000000000, 16000000000) + setup.setBalance(balance, 16000000000, 16000000000) + exp(balance, 16000000000, 16000000000) expTotal(16000000000) - clock.Run(time.Hour * 2) - exp(8000000000, 4000000000) + + setup.clock.Run(time.Hour * 2) + exp(balance, 8000000000, 4000000000) expTotal(8000000000) - bt.Stop() - ns.Stop() - - clock = &mclock.Simulated{} - ns = nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) - posExp = &utils.Expirer{} - negExp = &utils.Expirer{} - posExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour*2)) // halves every two hours - negExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour)) // halves every hour - bt = NewBalanceTracker(ns, btTestSetup, db, clock, posExp, negExp) - ns.Start() - bts = &balanceTestSetup{ - clock: clock, - ns: ns, - bt: bt, - } + setup.stop() + + // Test the functionalities after restart + setup = newBalanceTestSetup(setup.db, posExp, negExp) expTotal(8000000000) - nb = bts.newNode(0) - exp(8000000000, 4000000000) + balance = setup.newNode(0) + exp(balance, 8000000000, 4000000000) expTotal(8000000000) - clock.Run(time.Hour * 2) - exp(4000000000, 1000000000) + setup.clock.Run(time.Hour * 2) + exp(balance, 4000000000, 1000000000) expTotal(4000000000) - bt.Stop() - ns.Stop() + setup.stop() } diff --git a/les/vflux/server/balance_tracker.go b/les/vflux/server/balance_tracker.go index 1708019de4..746697a8c7 100644 --- a/les/vflux/server/balance_tracker.go +++ b/les/vflux/server/balance_tracker.go @@ -17,7 +17,6 @@ package server import ( - "reflect" "sync" "time" @@ -25,6 +24,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/les/utils" "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/p2p/nodestate" ) @@ -34,82 +34,56 @@ const ( persistExpirationRefresh = time.Minute * 5 // refresh period of the token expiration persistence ) -// BalanceTrackerSetup contains node state flags and fields used by BalanceTracker -type BalanceTrackerSetup struct { - // controlled by PriorityPool - PriorityFlag, UpdateFlag nodestate.Flags - BalanceField nodestate.Field - // external connections - connAddressField, capacityField nodestate.Field -} - -// NewBalanceTrackerSetup creates a new BalanceTrackerSetup and initializes the fields -// and flags controlled by BalanceTracker -func NewBalanceTrackerSetup(setup *nodestate.Setup) BalanceTrackerSetup { - return BalanceTrackerSetup{ - // PriorityFlag is set if the node has a positive balance - PriorityFlag: setup.NewFlag("priorityNode"), - // UpdateFlag set and then immediately reset if the balance has been updated and - // therefore priority is suddenly changed - UpdateFlag: setup.NewFlag("balanceUpdate"), - // BalanceField contains the NodeBalance struct which implements nodePriority, - // allowing on-demand priority calculation and future priority estimation - BalanceField: setup.NewField("balance", reflect.TypeOf(&NodeBalance{})), - } -} - -// Connect sets the fields used by BalanceTracker as an input -func (bts *BalanceTrackerSetup) Connect(connAddressField, capacityField nodestate.Field) { - bts.connAddressField = connAddressField - bts.capacityField = capacityField -} - -// BalanceTracker tracks positive and negative balances for connected nodes. -// After connAddressField is set externally, a NodeBalance is created and previous +// balanceTracker tracks positive and negative balances for connected nodes. +// After clientField is set externally, a nodeBalance is created and previous // balance values are loaded from the database. Both balances are exponentially expired // values. Costs are deducted from the positive balance if present, otherwise added to // the negative balance. If the capacity is non-zero then a time cost is applied // continuously while individual request costs are applied immediately. // The two balances are translated into a single priority value that also depends // on the actual capacity. -type BalanceTracker struct { - BalanceTrackerSetup - clock mclock.Clock - lock sync.Mutex - ns *nodestate.NodeStateMachine - ndb *nodeDB - posExp, negExp utils.ValueExpirer - posExpTC, negExpTC uint64 +type balanceTracker struct { + setup *serverSetup + clock mclock.Clock + lock sync.Mutex + ns *nodestate.NodeStateMachine + ndb *nodeDB + posExp, negExp utils.ValueExpirer + + posExpTC, negExpTC uint64 + defaultPosFactors, defaultNegFactors PriceFactors active, inactive utils.ExpiredValue balanceTimer *utils.UpdateTimer quit chan struct{} } -// NewBalanceTracker creates a new BalanceTracker -func NewBalanceTracker(ns *nodestate.NodeStateMachine, setup BalanceTrackerSetup, db ethdb.KeyValueStore, clock mclock.Clock, posExp, negExp utils.ValueExpirer) *BalanceTracker { +// newBalanceTracker creates a new balanceTracker +func newBalanceTracker(ns *nodestate.NodeStateMachine, setup *serverSetup, db ethdb.KeyValueStore, clock mclock.Clock, posExp, negExp utils.ValueExpirer) *balanceTracker { ndb := newNodeDB(db, clock) - bt := &BalanceTracker{ - ns: ns, - BalanceTrackerSetup: setup, - ndb: ndb, - clock: clock, - posExp: posExp, - negExp: negExp, - balanceTimer: utils.NewUpdateTimer(clock, time.Second*10), - quit: make(chan struct{}), + bt := &balanceTracker{ + ns: ns, + setup: setup, + ndb: ndb, + clock: clock, + posExp: posExp, + negExp: negExp, + balanceTimer: utils.NewUpdateTimer(clock, time.Second*10), + quit: make(chan struct{}), } posOffset, negOffset := bt.ndb.getExpiration() posExp.SetLogOffset(clock.Now(), posOffset) negExp.SetLogOffset(clock.Now(), negOffset) + // Load all persisted balance entries of priority nodes, + // calculate the total number of issued service tokens. bt.ndb.forEachBalance(false, func(id enode.ID, balance utils.ExpiredValue) bool { bt.inactive.AddExp(balance) return true }) - ns.SubscribeField(bt.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { - n, _ := ns.GetField(node, bt.BalanceField).(*NodeBalance) + ns.SubscribeField(bt.setup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + n, _ := ns.GetField(node, bt.setup.balanceField).(*nodeBalance) if n == nil { return } @@ -126,15 +100,22 @@ func NewBalanceTracker(ns *nodestate.NodeStateMachine, setup BalanceTrackerSetup n.deactivate() } }) - ns.SubscribeField(bt.connAddressField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + ns.SubscribeField(bt.setup.clientField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + type peer interface { + FreeClientId() string + } if newValue != nil { - ns.SetFieldSub(node, bt.BalanceField, bt.newNodeBalance(node, newValue.(string))) + n := bt.newNodeBalance(node, newValue.(peer).FreeClientId(), true) + bt.lock.Lock() + n.SetPriceFactors(bt.defaultPosFactors, bt.defaultNegFactors) + bt.lock.Unlock() + ns.SetFieldSub(node, bt.setup.balanceField, n) } else { - ns.SetStateSub(node, nodestate.Flags{}, bt.PriorityFlag, 0) - if b, _ := ns.GetField(node, bt.BalanceField).(*NodeBalance); b != nil { + ns.SetStateSub(node, nodestate.Flags{}, bt.setup.priorityFlag, 0) + if b, _ := ns.GetField(node, bt.setup.balanceField).(*nodeBalance); b != nil { b.deactivate() } - ns.SetFieldSub(node, bt.BalanceField, nil) + ns.SetFieldSub(node, bt.setup.balanceField, nil) } }) @@ -157,31 +138,31 @@ func NewBalanceTracker(ns *nodestate.NodeStateMachine, setup BalanceTrackerSetup return bt } -// Stop saves expiration offset and unsaved node balances and shuts BalanceTracker down -func (bt *BalanceTracker) Stop() { +// Stop saves expiration offset and unsaved node balances and shuts balanceTracker down +func (bt *balanceTracker) stop() { now := bt.clock.Now() bt.ndb.setExpiration(bt.posExp.LogOffset(now), bt.negExp.LogOffset(now)) close(bt.quit) bt.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - if n, ok := bt.ns.GetField(node, bt.BalanceField).(*NodeBalance); ok { + if n, ok := bt.ns.GetField(node, bt.setup.balanceField).(*nodeBalance); ok { n.lock.Lock() n.storeBalance(true, true) n.lock.Unlock() - bt.ns.SetField(node, bt.BalanceField, nil) + bt.ns.SetField(node, bt.setup.balanceField, nil) } }) bt.ndb.close() } // TotalTokenAmount returns the current total amount of service tokens in existence -func (bt *BalanceTracker) TotalTokenAmount() uint64 { +func (bt *balanceTracker) TotalTokenAmount() uint64 { bt.lock.Lock() defer bt.lock.Unlock() bt.balanceTimer.Update(func(_ time.Duration) bool { bt.active = utils.ExpiredValue{} bt.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - if n, ok := bt.ns.GetField(node, bt.BalanceField).(*NodeBalance); ok && n.active { + if n, ok := bt.ns.GetField(node, bt.setup.balanceField).(*nodeBalance); ok && n.active { pos, _ := n.GetRawBalance() bt.active.AddExp(pos) } @@ -194,13 +175,21 @@ func (bt *BalanceTracker) TotalTokenAmount() uint64 { } // GetPosBalanceIDs lists node IDs with an associated positive balance -func (bt *BalanceTracker) GetPosBalanceIDs(start, stop enode.ID, maxCount int) (result []enode.ID) { +func (bt *balanceTracker) GetPosBalanceIDs(start, stop enode.ID, maxCount int) (result []enode.ID) { return bt.ndb.getPosBalanceIDs(start, stop, maxCount) } +// SetDefaultFactors sets the default price factors applied to subsequently connected clients +func (bt *balanceTracker) SetDefaultFactors(posFactors, negFactors PriceFactors) { + bt.lock.Lock() + bt.defaultPosFactors = posFactors + bt.defaultNegFactors = negFactors + bt.lock.Unlock() +} + // SetExpirationTCs sets positive and negative token expiration time constants. // Specified in seconds, 0 means infinite (no expiration). -func (bt *BalanceTracker) SetExpirationTCs(pos, neg uint64) { +func (bt *balanceTracker) SetExpirationTCs(pos, neg uint64) { bt.lock.Lock() defer bt.lock.Unlock() @@ -220,39 +209,55 @@ func (bt *BalanceTracker) SetExpirationTCs(pos, neg uint64) { // GetExpirationTCs returns the current positive and negative token expiration // time constants -func (bt *BalanceTracker) GetExpirationTCs() (pos, neg uint64) { +func (bt *balanceTracker) GetExpirationTCs() (pos, neg uint64) { bt.lock.Lock() defer bt.lock.Unlock() return bt.posExpTC, bt.negExpTC } -// newNodeBalance loads balances from the database and creates a NodeBalance instance -// for the given node. It also sets the PriorityFlag and adds balanceCallbackZero if +// BalanceOperation allows atomic operations on the balance of a node regardless of whether +// it is currently connected or not +func (bt *balanceTracker) BalanceOperation(id enode.ID, connAddress string, cb func(AtomicBalanceOperator)) { + bt.ns.Operation(func() { + var nb *nodeBalance + if node := bt.ns.GetNode(id); node != nil { + nb, _ = bt.ns.GetField(node, bt.setup.balanceField).(*nodeBalance) + } else { + node = enode.SignNull(&enr.Record{}, id) + nb = bt.newNodeBalance(node, connAddress, false) + } + cb(nb) + }) +} + +// newNodeBalance loads balances from the database and creates a nodeBalance instance +// for the given node. It also sets the priorityFlag and adds balanceCallbackZero if // the node has a positive balance. // Note: this function should run inside a NodeStateMachine operation -func (bt *BalanceTracker) newNodeBalance(node *enode.Node, negBalanceKey string) *NodeBalance { +func (bt *balanceTracker) newNodeBalance(node *enode.Node, connAddress string, setFlags bool) *nodeBalance { pb := bt.ndb.getOrNewBalance(node.ID().Bytes(), false) - nb := bt.ndb.getOrNewBalance([]byte(negBalanceKey), true) - n := &NodeBalance{ + nb := bt.ndb.getOrNewBalance([]byte(connAddress), true) + n := &nodeBalance{ bt: bt, node: node, - connAddress: negBalanceKey, - balance: balance{pos: pb, neg: nb}, + setFlags: setFlags, + connAddress: connAddress, + balance: balance{pos: pb, neg: nb, posExp: bt.posExp, negExp: bt.negExp}, initTime: bt.clock.Now(), lastUpdate: bt.clock.Now(), } for i := range n.callbackIndex { n.callbackIndex[i] = -1 } - if n.checkPriorityStatus() { - n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0) + if setFlags && n.checkPriorityStatus() { + n.bt.ns.SetStateSub(n.node, n.bt.setup.priorityFlag, nodestate.Flags{}, 0) } return n } // storeBalance stores either a positive or a negative balance in the database -func (bt *BalanceTracker) storeBalance(id []byte, neg bool, value utils.ExpiredValue) { +func (bt *balanceTracker) storeBalance(id []byte, neg bool, value utils.ExpiredValue) { if bt.canDropBalance(bt.clock.Now(), neg, value) { bt.ndb.delBalance(id, neg) // balance is small enough, drop it directly. } else { @@ -262,7 +267,7 @@ func (bt *BalanceTracker) storeBalance(id []byte, neg bool, value utils.ExpiredV // canDropBalance tells whether a positive or negative balance is below the threshold // and therefore can be dropped from the database -func (bt *BalanceTracker) canDropBalance(now mclock.AbsTime, neg bool, b utils.ExpiredValue) bool { +func (bt *balanceTracker) canDropBalance(now mclock.AbsTime, neg bool, b utils.ExpiredValue) bool { if neg { return b.Value(bt.negExp.LogOffset(now)) <= negThreshold } @@ -270,7 +275,7 @@ func (bt *BalanceTracker) canDropBalance(now mclock.AbsTime, neg bool, b utils.E } // updateTotalBalance adjusts the total balance after executing given callback. -func (bt *BalanceTracker) updateTotalBalance(n *NodeBalance, callback func() bool) { +func (bt *balanceTracker) updateTotalBalance(n *nodeBalance, callback func() bool) { bt.lock.Lock() defer bt.lock.Unlock() diff --git a/les/vflux/server/clientpool.go b/les/vflux/server/clientpool.go new file mode 100644 index 0000000000..2e5fdd0ee7 --- /dev/null +++ b/les/vflux/server/clientpool.go @@ -0,0 +1,335 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package server + +import ( + "errors" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/les/utils" + "github.com/ethereum/go-ethereum/les/vflux" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/nodestate" + "github.com/ethereum/go-ethereum/rlp" +) + +var ( + ErrNotConnected = errors.New("client not connected") + ErrNoPriority = errors.New("priority too low to raise capacity") + ErrCantFindMaximum = errors.New("Unable to find maximum allowed capacity") +) + +// ClientPool implements a client database that assigns a priority to each client +// based on a positive and negative balance. Positive balance is externally assigned +// to prioritized clients and is decreased with connection time and processed +// requests (unless the price factors are zero). If the positive balance is zero +// then negative balance is accumulated. +// +// Balance tracking and priority calculation for connected clients is done by +// balanceTracker. PriorityQueue ensures that clients with the lowest positive or +// highest negative balance get evicted when the total capacity allowance is full +// and new clients with a better balance want to connect. +// +// Already connected nodes receive a small bias in their favor in order to avoid +// accepting and instantly kicking out clients. In theory, we try to ensure that +// each client can have several minutes of connection time. +// +// Balances of disconnected clients are stored in nodeDB including positive balance +// and negative banalce. Boeth positive balance and negative balance will decrease +// exponentially. If the balance is low enough, then the record will be dropped. +type ClientPool struct { + *priorityPool + *balanceTracker + + setup *serverSetup + clock mclock.Clock + closed bool + ns *nodestate.NodeStateMachine + synced func() bool + + lock sync.RWMutex + connectedBias time.Duration + + minCap uint64 // the minimal capacity value allowed for any client + capReqNode *enode.Node // node that is requesting capacity change; only used inside NSM operation +} + +// clientPeer represents a peer in the client pool. None of the callbacks should block. +type clientPeer interface { + Node() *enode.Node + FreeClientId() string // unique id for non-priority clients (typically a prefix of the network address) + InactiveAllowance() time.Duration // disconnection timeout for inactive non-priority peers + UpdateCapacity(newCap uint64, requested bool) // signals a capacity update (requested is true if it is a result of a SetCapacity call on the given peer + Disconnect() // initiates disconnection (Unregister should always be called) +} + +// NewClientPool creates a new client pool +func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias time.Duration, clock mclock.Clock, synced func() bool) *ClientPool { + setup := newServerSetup() + ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup) + cp := &ClientPool{ + priorityPool: newPriorityPool(ns, setup, clock, minCap, connectedBias, 4, 100), + balanceTracker: newBalanceTracker(ns, setup, balanceDb, clock, &utils.Expirer{}, &utils.Expirer{}), + setup: setup, + ns: ns, + clock: clock, + minCap: minCap, + connectedBias: connectedBias, + synced: synced, + } + + ns.SubscribeState(nodestate.MergeFlags(setup.activeFlag, setup.inactiveFlag, setup.priorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { + if newState.Equals(setup.inactiveFlag) { + // set timeout for non-priority inactive client + var timeout time.Duration + if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { + timeout = c.InactiveAllowance() + } + if timeout > 0 { + ns.AddTimeout(node, setup.inactiveFlag, timeout) + } else { + // Note: if capacity is immediately available then priorityPool will set the active + // flag simultaneously with removing the inactive flag and therefore this will not + // initiate disconnection + ns.SetStateSub(node, nodestate.Flags{}, setup.inactiveFlag, 0) + } + } + if oldState.Equals(setup.inactiveFlag) && newState.Equals(setup.inactiveFlag.Or(setup.priorityFlag)) { + ns.SetStateSub(node, setup.inactiveFlag, nodestate.Flags{}, 0) // priority gained; remove timeout + } + if newState.Equals(setup.activeFlag) { + // active with no priority; limit capacity to minCap + cap, _ := ns.GetField(node, setup.capacityField).(uint64) + if cap > minCap { + cp.requestCapacity(node, minCap, minCap, 0) + } + } + if newState.Equals(nodestate.Flags{}) { + if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { + c.Disconnect() + } + } + }) + + ns.SubscribeField(setup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { + newCap, _ := newValue.(uint64) + c.UpdateCapacity(newCap, node == cp.capReqNode) + } + }) + + // add metrics + cp.ns.SubscribeState(nodestate.MergeFlags(cp.setup.activeFlag, cp.setup.inactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { + if oldState.IsEmpty() && !newState.IsEmpty() { + clientConnectedMeter.Mark(1) + } + if !oldState.IsEmpty() && newState.IsEmpty() { + clientDisconnectedMeter.Mark(1) + } + if oldState.HasNone(cp.setup.activeFlag) && oldState.HasAll(cp.setup.activeFlag) { + clientActivatedMeter.Mark(1) + } + if oldState.HasAll(cp.setup.activeFlag) && oldState.HasNone(cp.setup.activeFlag) { + clientDeactivatedMeter.Mark(1) + } + _, connected := cp.Active() + totalConnectedGauge.Update(int64(connected)) + }) + return cp +} + +// Start starts the client pool. Should be called before Register/Unregister. +func (cp *ClientPool) Start() { + cp.ns.Start() +} + +// Stop shuts the client pool down. The clientPeer interface callbacks will not be called +// after Stop. Register calls will return nil. +func (cp *ClientPool) Stop() { + cp.balanceTracker.stop() + cp.ns.Stop() +} + +// Register registers the peer into the client pool. If the peer has insufficient +// priority and remains inactive for longer than the allowed timeout then it will be +// disconnected by calling the Disconnect function of the clientPeer interface. +func (cp *ClientPool) Register(peer clientPeer) ConnectedBalance { + cp.ns.SetField(peer.Node(), cp.setup.clientField, peerWrapper{peer}) + balance, _ := cp.ns.GetField(peer.Node(), cp.setup.balanceField).(*nodeBalance) + return balance +} + +// Unregister removes the peer from the client pool +func (cp *ClientPool) Unregister(peer clientPeer) { + cp.ns.SetField(peer.Node(), cp.setup.clientField, nil) +} + +// setConnectedBias sets the connection bias, which is applied to already connected clients +// So that already connected client won't be kicked out very soon and we can ensure all +// connected clients can have enough time to request or sync some data. +func (cp *ClientPool) SetConnectedBias(bias time.Duration) { + cp.lock.Lock() + cp.connectedBias = bias + cp.setActiveBias(bias) + cp.lock.Unlock() +} + +// SetCapacity sets the assigned capacity of a connected client +func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Duration, requested bool) (capacity uint64, err error) { + cp.lock.RLock() + if cp.connectedBias > bias { + bias = cp.connectedBias + } + cp.lock.RUnlock() + + cp.ns.Operation(func() { + balance, _ := cp.ns.GetField(node, cp.setup.balanceField).(*nodeBalance) + if balance == nil { + err = ErrNotConnected + return + } + capacity, _ = cp.ns.GetField(node, cp.setup.capacityField).(uint64) + if capacity == 0 { + // if the client is inactive then it has insufficient priority for the minimal capacity + // (will be activated automatically with minCap when possible) + return + } + if reqCap < cp.minCap { + // can't request less than minCap; switching between 0 (inactive state) and minCap is + // performed by the server automatically as soon as necessary/possible + reqCap = cp.minCap + } + if reqCap > cp.minCap && cp.ns.GetState(node).HasNone(cp.setup.priorityFlag) { + err = ErrNoPriority + return + } + if reqCap == capacity { + return + } + if requested { + // mark the requested node so that the UpdateCapacity callback can signal + // whether the update is the direct result of a SetCapacity call on the given node + cp.capReqNode = node + defer func() { + cp.capReqNode = nil + }() + } + + var minTarget, maxTarget uint64 + if reqCap > capacity { + // Estimate maximum available capacity at the current priority level and request + // the estimated amount. + // Note: requestCapacity could find the highest available capacity between the + // current and the requested capacity but it could cost a lot of iterations with + // fine step adjustment if the requested capacity is very high. By doing a quick + // estimation of the maximum available capacity based on the capacity curve we + // can limit the number of required iterations. + curve := cp.getCapacityCurve().exclude(node.ID()) + maxTarget = curve.maxCapacity(func(capacity uint64) int64 { + return balance.estimatePriority(capacity, 0, 0, bias, false) + }) + if maxTarget <= capacity { + return + } + if maxTarget > reqCap { + maxTarget = reqCap + } + // Specify a narrow target range that allows a limited number of fine step + // iterations + minTarget = maxTarget - maxTarget/20 + if minTarget < capacity { + minTarget = capacity + } + } else { + minTarget, maxTarget = reqCap, reqCap + } + if newCap := cp.requestCapacity(node, minTarget, maxTarget, bias); newCap >= minTarget && newCap <= maxTarget { + capacity = newCap + return + } + // we should be able to find the maximum allowed capacity in a few iterations + log.Error("Unable to find maximum allowed capacity") + err = ErrCantFindMaximum + }) + return +} + +// serveCapQuery serves a vflux capacity query. It receives multiple token amount values +// and a bias time value. For each given token amount it calculates the maximum achievable +// capacity in case the amount is added to the balance. +func (cp *ClientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []byte { + var req vflux.CapacityQueryReq + if rlp.DecodeBytes(data, &req) != nil { + return nil + } + if l := len(req.AddTokens); l == 0 || l > vflux.CapacityQueryMaxLen { + return nil + } + result := make(vflux.CapacityQueryReply, len(req.AddTokens)) + if !cp.synced() { + capacityQueryZeroMeter.Mark(1) + reply, _ := rlp.EncodeToBytes(&result) + return reply + } + + bias := time.Second * time.Duration(req.Bias) + cp.lock.RLock() + if cp.connectedBias > bias { + bias = cp.connectedBias + } + cp.lock.RUnlock() + + // use capacityCurve to answer request for multiple newly bought token amounts + curve := cp.getCapacityCurve().exclude(id) + cp.BalanceOperation(id, freeID, func(balance AtomicBalanceOperator) { + pb, _ := balance.GetBalance() + for i, addTokens := range req.AddTokens { + add := addTokens.Int64() + result[i] = curve.maxCapacity(func(capacity uint64) int64 { + return balance.estimatePriority(capacity, add, 0, bias, false) / int64(capacity) + }) + if add <= 0 && uint64(-add) >= pb && result[i] > cp.minCap { + result[i] = cp.minCap + } + if result[i] < cp.minCap { + result[i] = 0 + } + } + }) + // add first result to metrics (don't care about priority client multi-queries yet) + if result[0] == 0 { + capacityQueryZeroMeter.Mark(1) + } else { + capacityQueryNonZeroMeter.Mark(1) + } + reply, _ := rlp.EncodeToBytes(&result) + return reply +} + +// Handle implements Service +func (cp *ClientPool) Handle(id enode.ID, address string, name string, data []byte) []byte { + switch name { + case vflux.CapacityQueryName: + return cp.serveCapQuery(id, address, data) + default: + return nil + } +} diff --git a/les/clientpool_test.go b/les/vflux/server/clientpool_test.go similarity index 62% rename from les/clientpool_test.go rename to les/vflux/server/clientpool_test.go index 345b373b0f..9503121697 100644 --- a/les/clientpool_test.go +++ b/les/vflux/server/clientpool_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package les +package server import ( "fmt" @@ -24,12 +24,13 @@ import ( "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/core/rawdb" - vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/p2p/nodestate" ) +const defaultConnectedBias = time.Minute * 3 + func TestClientPoolL10C100Free(t *testing.T) { testClientPool(t, 10, 100, 0, true) } @@ -64,11 +65,6 @@ type poolTestPeer struct { inactiveAllowed bool } -func testStateMachine() *nodestate.NodeStateMachine { - return nodestate.NewNodeStateMachine(nil, nil, mclock.System{}, serverSetup) - -} - func newPoolTestPeer(i int, disconnCh chan int) *poolTestPeer { return &poolTestPeer{ index: i, @@ -81,36 +77,39 @@ func (i *poolTestPeer) Node() *enode.Node { return i.node } -func (i *poolTestPeer) freeClientId() string { +func (i *poolTestPeer) FreeClientId() string { return fmt.Sprintf("addr #%d", i.index) } -func (i *poolTestPeer) updateCapacity(cap uint64) { - i.cap = cap +func (i *poolTestPeer) InactiveAllowance() time.Duration { + if i.inactiveAllowed { + return time.Second * 10 + } + return 0 } -func (i *poolTestPeer) freeze() {} - -func (i *poolTestPeer) allowInactive() bool { - return i.inactiveAllowed +func (i *poolTestPeer) UpdateCapacity(capacity uint64, requested bool) { + i.cap = capacity } -func getBalance(pool *clientPool, p *poolTestPeer) (pos, neg uint64) { - temp := pool.ns.GetField(p.node, clientInfoField) == nil - if temp { - pool.ns.SetField(p.node, connAddressField, p.freeClientId()) - } - n, _ := pool.ns.GetField(p.node, pool.BalanceField).(*vfs.NodeBalance) - pos, neg = n.GetBalance() - if temp { - pool.ns.SetField(p.node, connAddressField, nil) +func (i *poolTestPeer) Disconnect() { + if i.disconnCh == nil { + return } + id := i.node.ID() + i.disconnCh <- int(id[0]) + int(id[1])<<8 +} + +func getBalance(pool *ClientPool, p *poolTestPeer) (pos, neg uint64) { + pool.BalanceOperation(p.node.ID(), p.FreeClientId(), func(nb AtomicBalanceOperator) { + pos, neg = nb.GetBalance() + }) return } -func addBalance(pool *clientPool, id enode.ID, amount int64) { - pool.forClients([]enode.ID{id}, func(c *clientInfo) { - c.balance.AddBalance(amount) +func addBalance(pool *ClientPool, id enode.ID, amount int64) { + pool.BalanceOperation(id, "", func(nb AtomicBalanceOperator) { + nb.AddBalance(amount) }) } @@ -122,6 +121,19 @@ func checkDiff(a, b uint64) bool { return a > b+maxDiff || b > a+maxDiff } +func connect(pool *ClientPool, peer *poolTestPeer) uint64 { + pool.Register(peer) + return peer.cap +} + +func disconnect(pool *ClientPool, peer *poolTestPeer) { + pool.Unregister(peer) +} + +func alwaysTrueFn() bool { + return true +} + func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, randomDisconnect bool) { rand.Seed(time.Now().UnixNano()) var ( @@ -130,19 +142,17 @@ func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, rando connected = make([]bool, clientCount) connTicks = make([]int, clientCount) disconnCh = make(chan int, clientCount) - disconnFn = func(id enode.ID) { - disconnCh <- int(id[0]) + int(id[1])<<8 - } - pool = newClientPool(testStateMachine(), db, 1, 0, &clock, disconnFn) + pool = NewClientPool(db, 1, 0, &clock, alwaysTrueFn) ) - pool.ns.Start() + pool.Start() + pool.SetExpirationTCs(0, 1000) - pool.setLimits(activeLimit, uint64(activeLimit)) - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool.SetLimits(uint64(activeLimit), uint64(activeLimit)) + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) // pool should accept new peers up to its connected limit for i := 0; i < activeLimit; i++ { - if cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 { + if cap := connect(pool, newPoolTestPeer(i, disconnCh)); cap != 0 { connected[i] = true } else { t.Fatalf("Test peer #%d rejected", i) @@ -163,23 +173,23 @@ func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, rando i := rand.Intn(clientCount) if connected[i] { if randomDisconnect { - pool.disconnect(newPoolTestPeer(i, disconnCh)) + disconnect(pool, newPoolTestPeer(i, disconnCh)) connected[i] = false connTicks[i] += tickCounter } } else { - if cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 { + if cap := connect(pool, newPoolTestPeer(i, disconnCh)); cap != 0 { connected[i] = true connTicks[i] -= tickCounter } else { - pool.disconnect(newPoolTestPeer(i, disconnCh)) + disconnect(pool, newPoolTestPeer(i, disconnCh)) } } pollDisconnects: for { select { case i := <-disconnCh: - pool.disconnect(newPoolTestPeer(i, disconnCh)) + disconnect(pool, newPoolTestPeer(i, disconnCh)) if connected[i] { connTicks[i] += tickCounter connected[i] = false @@ -211,18 +221,18 @@ func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, rando t.Errorf("Total connected time of test node #%d (%d) outside expected range (%d to %d)", i, connTicks[i], min, max) } } - pool.stop() + pool.Stop() } -func testPriorityConnect(t *testing.T, pool *clientPool, p *poolTestPeer, cap uint64, expSuccess bool) { - if cap, _ := pool.connect(p); cap == 0 { +func testPriorityConnect(t *testing.T, pool *ClientPool, p *poolTestPeer, cap uint64, expSuccess bool) { + if cap := connect(pool, p); cap == 0 { if expSuccess { t.Fatalf("Failed to connect paid client") } else { return } } - if _, err := pool.setCapacity(p.node, "", cap, defaultConnectedBias, true); err != nil { + if newCap, _ := pool.SetCapacity(p.node, cap, defaultConnectedBias, true); newCap != cap { if expSuccess { t.Fatalf("Failed to raise capacity of paid client") } else { @@ -239,11 +249,11 @@ func TestConnectPaidClient(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) // Add balance for an external client and mark it as paid client addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute)) @@ -255,16 +265,16 @@ func TestConnectPaidClientToSmallPool(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) // Add balance for an external client and mark it as paid client addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute)) - // Connect a fat paid client to pool, should reject it. + // connect a fat paid client to pool, should reject it. testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 100, false) } @@ -273,24 +283,23 @@ func TestConnectPaidClientToFullPool(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - removeFn := func(enode.ID) {} // Noop - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { addBalance(pool, newPoolTestPeer(i, nil).node.ID(), int64(time.Second*20)) - pool.connect(newPoolTestPeer(i, nil)) + connect(pool, newPoolTestPeer(i, nil)) } addBalance(pool, newPoolTestPeer(11, nil).node.ID(), int64(time.Second*2)) // Add low balance to new paid client - if cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 { + if cap := connect(pool, newPoolTestPeer(11, nil)); cap != 0 { t.Fatalf("Low balance paid client should be rejected") } clock.Run(time.Second) addBalance(pool, newPoolTestPeer(12, nil).node.ID(), int64(time.Minute*5)) // Add high balance to new paid client - if cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap == 0 { + if cap := connect(pool, newPoolTestPeer(12, nil)); cap == 0 { t.Fatalf("High balance paid client should be accepted") } } @@ -301,23 +310,20 @@ func TestPaidClientKickedOut(t *testing.T) { db = rawdb.NewMemoryDatabase() kickedCh = make(chan int, 100) ) - removeFn := func(id enode.ID) { - kickedCh <- int(id[0]) - } - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn) - pool.ns.Start() - pool.bt.SetExpirationTCs(0, 0) - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + pool.SetExpirationTCs(0, 0) + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { addBalance(pool, newPoolTestPeer(i, kickedCh).node.ID(), 10000000000) // 10 second allowance - pool.connect(newPoolTestPeer(i, kickedCh)) + connect(pool, newPoolTestPeer(i, kickedCh)) clock.Run(time.Millisecond) } clock.Run(defaultConnectedBias + time.Second*11) - if cap, _ := pool.connect(newPoolTestPeer(11, kickedCh)); cap == 0 { + if cap := connect(pool, newPoolTestPeer(11, kickedCh)); cap == 0 { t.Fatalf("Free client should be accepted") } select { @@ -335,12 +341,12 @@ func TestConnectFreeClient(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) - if cap, _ := pool.connect(newPoolTestPeer(0, nil)); cap == 0 { + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + if cap := connect(pool, newPoolTestPeer(0, nil)); cap == 0 { t.Fatalf("Failed to connect free client") } testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 2, false) @@ -351,26 +357,25 @@ func TestConnectFreeClientToFullPool(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - removeFn := func(enode.ID) {} // Noop - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { - pool.connect(newPoolTestPeer(i, nil)) + connect(pool, newPoolTestPeer(i, nil)) } - if cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 { + if cap := connect(pool, newPoolTestPeer(11, nil)); cap != 0 { t.Fatalf("New free client should be rejected") } clock.Run(time.Minute) - if cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap != 0 { + if cap := connect(pool, newPoolTestPeer(12, nil)); cap != 0 { t.Fatalf("New free client should be rejected") } clock.Run(time.Millisecond) clock.Run(4 * time.Minute) - if cap, _ := pool.connect(newPoolTestPeer(13, nil)); cap == 0 { + if cap := connect(pool, newPoolTestPeer(13, nil)); cap == 0 { t.Fatalf("Old client connects more than 5min should be kicked") } } @@ -381,18 +386,17 @@ func TestFreeClientKickedOut(t *testing.T) { db = rawdb.NewMemoryDatabase() kicked = make(chan int, 100) ) - removeFn := func(id enode.ID) { kicked <- int(id[0]) } - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { - pool.connect(newPoolTestPeer(i, kicked)) + connect(pool, newPoolTestPeer(i, kicked)) clock.Run(time.Millisecond) } - if cap, _ := pool.connect(newPoolTestPeer(10, kicked)); cap != 0 { + if cap := connect(pool, newPoolTestPeer(10, kicked)); cap != 0 { t.Fatalf("New free client should be rejected") } select { @@ -400,10 +404,10 @@ func TestFreeClientKickedOut(t *testing.T) { case <-time.NewTimer(time.Second).C: t.Fatalf("timeout") } - pool.disconnect(newPoolTestPeer(10, kicked)) + disconnect(pool, newPoolTestPeer(10, kicked)) clock.Run(5 * time.Minute) for i := 0; i < 10; i++ { - pool.connect(newPoolTestPeer(i+10, kicked)) + connect(pool, newPoolTestPeer(i+10, kicked)) } for i := 0; i < 10; i++ { select { @@ -423,18 +427,17 @@ func TestPositiveBalanceCalculation(t *testing.T) { db = rawdb.NewMemoryDatabase() kicked = make(chan int, 10) ) - removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute*3)) testPriorityConnect(t, pool, newPoolTestPeer(0, kicked), 10, true) clock.Run(time.Minute) - pool.disconnect(newPoolTestPeer(0, kicked)) + disconnect(pool, newPoolTestPeer(0, kicked)) pb, _ := getBalance(pool, newPoolTestPeer(0, kicked)) if checkDiff(pb, uint64(time.Minute*2)) { t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute*2), pb) @@ -447,12 +450,11 @@ func TestDowngradePriorityClient(t *testing.T) { db = rawdb.NewMemoryDatabase() kicked = make(chan int, 10) ) - removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) p := newPoolTestPeer(0, kicked) addBalance(pool, p.node.ID(), int64(time.Minute)) @@ -483,30 +485,31 @@ func TestNegativeBalanceCalculation(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetExpirationTCs(0, 3600) + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { - pool.connect(newPoolTestPeer(i, nil)) + connect(pool, newPoolTestPeer(i, nil)) } clock.Run(time.Second) for i := 0; i < 10; i++ { - pool.disconnect(newPoolTestPeer(i, nil)) + disconnect(pool, newPoolTestPeer(i, nil)) _, nb := getBalance(pool, newPoolTestPeer(i, nil)) if nb != 0 { t.Fatalf("Short connection shouldn't be recorded") } } for i := 0; i < 10; i++ { - pool.connect(newPoolTestPeer(i, nil)) + connect(pool, newPoolTestPeer(i, nil)) } clock.Run(time.Minute) for i := 0; i < 10; i++ { - pool.disconnect(newPoolTestPeer(i, nil)) + disconnect(pool, newPoolTestPeer(i, nil)) _, nb := getBalance(pool, newPoolTestPeer(i, nil)) exp := uint64(time.Minute) / 1000 exp -= exp / 120 // correct for negative balance expiration @@ -521,10 +524,10 @@ func TestInactiveClient(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}) - pool.ns.Start() - defer pool.stop() - pool.setLimits(2, uint64(2)) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(2, uint64(2)) p1 := newPoolTestPeer(1, nil) p1.inactiveAllowed = true @@ -535,15 +538,15 @@ func TestInactiveClient(t *testing.T) { addBalance(pool, p1.node.ID(), 1000*int64(time.Second)) addBalance(pool, p3.node.ID(), 2000*int64(time.Second)) // p1: 1000 p2: 0 p3: 2000 - p1.cap, _ = pool.connect(p1) + p1.cap = connect(pool, p1) if p1.cap != 1 { t.Fatalf("Failed to connect peer #1") } - p2.cap, _ = pool.connect(p2) + p2.cap = connect(pool, p2) if p2.cap != 1 { t.Fatalf("Failed to connect peer #2") } - p3.cap, _ = pool.connect(p3) + p3.cap = connect(pool, p3) if p3.cap != 1 { t.Fatalf("Failed to connect peer #3") } @@ -566,11 +569,11 @@ func TestInactiveClient(t *testing.T) { if p2.cap != 0 { t.Fatalf("Failed to deactivate peer #2") } - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}) + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}) p4 := newPoolTestPeer(4, nil) addBalance(pool, p4.node.ID(), 1500*int64(time.Second)) // p1: 1000 p2: 500 p3: 2000 p4: 1500 - p4.cap, _ = pool.connect(p4) + p4.cap = connect(pool, p4) if p4.cap != 1 { t.Fatalf("Failed to activate peer #4") } @@ -579,8 +582,8 @@ func TestInactiveClient(t *testing.T) { } clock.Run(time.Second * 600) // manually trigger a check to avoid a long real-time wait - pool.ns.SetState(p1.node, pool.UpdateFlag, nodestate.Flags{}, 0) - pool.ns.SetState(p1.node, nodestate.Flags{}, pool.UpdateFlag, 0) + pool.ns.SetState(p1.node, pool.setup.updateFlag, nodestate.Flags{}, 0) + pool.ns.SetState(p1.node, nodestate.Flags{}, pool.setup.updateFlag, 0) // p1: 1000 p2: 500 p3: 2000 p4: 900 if p1.cap != 1 { t.Fatalf("Failed to activate peer #1") @@ -588,8 +591,8 @@ func TestInactiveClient(t *testing.T) { if p4.cap != 0 { t.Fatalf("Failed to deactivate peer #4") } - pool.disconnect(p2) - pool.disconnect(p4) + disconnect(pool, p2) + disconnect(pool, p4) addBalance(pool, p1.node.ID(), -1000*int64(time.Second)) if p1.cap != 1 { t.Fatalf("Should not deactivate peer #1") diff --git a/les/vflux/server/metrics.go b/les/vflux/server/metrics.go new file mode 100644 index 0000000000..307b8347af --- /dev/null +++ b/les/vflux/server/metrics.go @@ -0,0 +1,33 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package server + +import ( + "github.com/ethereum/go-ethereum/metrics" +) + +var ( + totalConnectedGauge = metrics.NewRegisteredGauge("vflux/server/totalConnected", nil) + + clientConnectedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/connected", nil) + clientActivatedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/activated", nil) + clientDeactivatedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/deactivated", nil) + clientDisconnectedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/disconnected", nil) + + capacityQueryZeroMeter = metrics.NewRegisteredMeter("vflux/server/capQueryZero", nil) + capacityQueryNonZeroMeter = metrics.NewRegisteredMeter("vflux/server/capQueryNonZero", nil) +) diff --git a/les/vflux/server/prioritypool.go b/les/vflux/server/prioritypool.go index e940ac7c65..573a3570a4 100644 --- a/les/vflux/server/prioritypool.go +++ b/les/vflux/server/prioritypool.go @@ -18,7 +18,6 @@ package server import ( "math" - "reflect" "sync" "time" @@ -33,36 +32,7 @@ const ( lazyQueueRefresh = time.Second * 10 // refresh period of the active queue ) -// PriorityPoolSetup contains node state flags and fields used by PriorityPool -// Note: ActiveFlag and InactiveFlag can be controlled both externally and by the pool, -// see PriorityPool description for details. -type PriorityPoolSetup struct { - // controlled by PriorityPool - ActiveFlag, InactiveFlag nodestate.Flags - CapacityField, ppNodeInfoField nodestate.Field - // external connections - updateFlag nodestate.Flags - priorityField nodestate.Field -} - -// NewPriorityPoolSetup creates a new PriorityPoolSetup and initializes the fields -// and flags controlled by PriorityPool -func NewPriorityPoolSetup(setup *nodestate.Setup) PriorityPoolSetup { - return PriorityPoolSetup{ - ActiveFlag: setup.NewFlag("active"), - InactiveFlag: setup.NewFlag("inactive"), - CapacityField: setup.NewField("capacity", reflect.TypeOf(uint64(0))), - ppNodeInfoField: setup.NewField("ppNodeInfo", reflect.TypeOf(&ppNodeInfo{})), - } -} - -// Connect sets the fields and flags used by PriorityPool as an input -func (pps *PriorityPoolSetup) Connect(priorityField nodestate.Field, updateFlag nodestate.Flags) { - pps.priorityField = priorityField // should implement nodePriority - pps.updateFlag = updateFlag // triggers an immediate priority update -} - -// PriorityPool handles a set of nodes where each node has a capacity (a scalar value) +// priorityPool handles a set of nodes where each node has a capacity (a scalar value) // and a priority (which can change over time and can also depend on the capacity). // A node is active if it has at least the necessary minimal amount of capacity while // inactive nodes have 0 capacity (values between 0 and the minimum are not allowed). @@ -79,70 +49,70 @@ func (pps *PriorityPoolSetup) Connect(priorityField nodestate.Field, updateFlag // This time bias can be interpreted as minimum expected active time at the given // capacity (if the threshold priority stays the same). // -// Nodes in the pool always have either InactiveFlag or ActiveFlag set. A new node is -// added to the pool by externally setting InactiveFlag. PriorityPool can switch a node -// between InactiveFlag and ActiveFlag at any time. Nodes can be removed from the pool -// by externally resetting both flags. ActiveFlag should not be set externally. +// Nodes in the pool always have either inactiveFlag or activeFlag set. A new node is +// added to the pool by externally setting inactiveFlag. priorityPool can switch a node +// between inactiveFlag and activeFlag at any time. Nodes can be removed from the pool +// by externally resetting both flags. activeFlag should not be set externally. // // The highest priority nodes in "inactive" state are moved to "active" state as soon as // the minimum capacity can be granted for them. The capacity of lower priority active // nodes is reduced or they are demoted to "inactive" state if their priority is // insufficient even at minimal capacity. -type PriorityPool struct { - PriorityPoolSetup - ns *nodestate.NodeStateMachine - clock mclock.Clock - lock sync.Mutex - activeQueue *prque.LazyQueue - inactiveQueue *prque.Prque - changed []*ppNodeInfo - activeCount, activeCap uint64 - maxCount, maxCap uint64 - minCap uint64 - activeBias time.Duration - capacityStepDiv uint64 - - cachedCurve *CapacityCurve +type priorityPool struct { + setup *serverSetup + ns *nodestate.NodeStateMachine + clock mclock.Clock + lock sync.Mutex + inactiveQueue *prque.Prque + maxCount, maxCap uint64 + minCap uint64 + activeBias time.Duration + capacityStepDiv, fineStepDiv uint64 + + cachedCurve *capacityCurve ccUpdatedAt mclock.AbsTime ccUpdateForced bool -} -// nodePriority interface provides current and estimated future priorities on demand -type nodePriority interface { - // Priority should return the current priority of the node (higher is better) - Priority(cap uint64) int64 - // EstMinPriority should return a lower estimate for the minimum of the node priority - // value starting from the current moment until the given time. If the priority goes - // under the returned estimate before the specified moment then it is the caller's - // responsibility to signal with updateFlag. - EstimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64 + tempState []*ppNodeInfo // nodes currently in temporary state + // the following fields represent the temporary state if tempState is not empty + activeCount, activeCap uint64 + activeQueue *prque.LazyQueue } -// ppNodeInfo is the internal node descriptor of PriorityPool +// ppNodeInfo is the internal node descriptor of priorityPool type ppNodeInfo struct { nodePriority nodePriority node *enode.Node connected bool - capacity, origCap uint64 - bias time.Duration - forced, changed bool + capacity uint64 // only changed when temporary state is committed activeIndex, inactiveIndex int -} -// NewPriorityPool creates a new PriorityPool -func NewPriorityPool(ns *nodestate.NodeStateMachine, setup PriorityPoolSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv uint64) *PriorityPool { - pp := &PriorityPool{ - ns: ns, - PriorityPoolSetup: setup, - clock: clock, - inactiveQueue: prque.New(inactiveSetIndex), - minCap: minCap, - activeBias: activeBias, - capacityStepDiv: capacityStepDiv, + tempState bool // should only be true while the priorityPool lock is held + tempCapacity uint64 // equals capacity when tempState is false + // the following fields only affect the temporary state and they are set to their + // default value when entering the temp state + minTarget, stepDiv uint64 + bias time.Duration +} + +// newPriorityPool creates a new priorityPool +func newPriorityPool(ns *nodestate.NodeStateMachine, setup *serverSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv, fineStepDiv uint64) *priorityPool { + pp := &priorityPool{ + setup: setup, + ns: ns, + clock: clock, + inactiveQueue: prque.New(inactiveSetIndex), + minCap: minCap, + activeBias: activeBias, + capacityStepDiv: capacityStepDiv, + fineStepDiv: fineStepDiv, + } + if pp.activeBias < time.Duration(1) { + pp.activeBias = time.Duration(1) } pp.activeQueue = prque.NewLazyQueue(activeSetIndex, activePriority, pp.activeMaxPriority, clock, lazyQueueRefresh) - ns.SubscribeField(pp.priorityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + ns.SubscribeField(pp.setup.balanceField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { if newValue != nil { c := &ppNodeInfo{ node: node, @@ -150,18 +120,19 @@ func NewPriorityPool(ns *nodestate.NodeStateMachine, setup PriorityPoolSetup, cl activeIndex: -1, inactiveIndex: -1, } - ns.SetFieldSub(node, pp.ppNodeInfoField, c) + ns.SetFieldSub(node, pp.setup.queueField, c) + ns.SetStateSub(node, setup.inactiveFlag, nodestate.Flags{}, 0) } else { - ns.SetStateSub(node, nodestate.Flags{}, pp.ActiveFlag.Or(pp.InactiveFlag), 0) - if n, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo); n != nil { + ns.SetStateSub(node, nodestate.Flags{}, pp.setup.activeFlag.Or(pp.setup.inactiveFlag), 0) + if n, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo); n != nil { pp.disconnectedNode(n) } - ns.SetFieldSub(node, pp.CapacityField, nil) - ns.SetFieldSub(node, pp.ppNodeInfoField, nil) + ns.SetFieldSub(node, pp.setup.capacityField, nil) + ns.SetFieldSub(node, pp.setup.queueField, nil) } }) - ns.SubscribeState(pp.ActiveFlag.Or(pp.InactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { - if c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo); c != nil { + ns.SubscribeState(pp.setup.activeFlag.Or(pp.setup.inactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { + if c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo); c != nil { if oldState.IsEmpty() { pp.connectedNode(c) } @@ -170,7 +141,7 @@ func NewPriorityPool(ns *nodestate.NodeStateMachine, setup PriorityPoolSetup, cl } } }) - ns.SubscribeState(pp.updateFlag, func(node *enode.Node, oldState, newState nodestate.Flags) { + ns.SubscribeState(pp.setup.updateFlag, func(node *enode.Node, oldState, newState nodestate.Flags) { if !newState.IsEmpty() { pp.updatePriority(node) } @@ -178,18 +149,12 @@ func NewPriorityPool(ns *nodestate.NodeStateMachine, setup PriorityPoolSetup, cl return pp } -// RequestCapacity checks whether changing the capacity of a node to the given target -// is possible (bias is applied in favor of other active nodes if the target is higher -// than the current capacity). -// If setCap is true then it also performs the change if possible. The function returns -// the minimum priority needed to do the change and whether it is currently allowed. -// If setCap and allowed are both true then the caller can assume that the change was -// successful. -// Note: priorityField should always be set before calling RequestCapacity. If setCap -// is false then both InactiveFlag and ActiveFlag can be unset and they are not changed -// by this function call either. -// Note 2: this function should run inside a NodeStateMachine operation -func (pp *PriorityPool) RequestCapacity(node *enode.Node, targetCap uint64, bias time.Duration, setCap bool) (minPriority int64, allowed bool) { +// requestCapacity tries to set the capacity of a connected node to the highest possible +// value inside the given target range. If maxTarget is not reachable then the capacity is +// iteratively reduced in fine steps based on the fineStepDiv parameter until minTarget is reached. +// The function returns the new capacity if successful and the original capacity otherwise. +// Note: this function should run inside a NodeStateMachine operation +func (pp *priorityPool) requestCapacity(node *enode.Node, minTarget, maxTarget uint64, bias time.Duration) uint64 { pp.lock.Lock() pp.activeQueue.Refresh() var updates []capUpdate @@ -198,39 +163,37 @@ func (pp *PriorityPool) RequestCapacity(node *enode.Node, targetCap uint64, bias pp.updateFlags(updates) }() - if targetCap < pp.minCap { - targetCap = pp.minCap + if minTarget < pp.minCap { + minTarget = pp.minCap + } + if maxTarget < minTarget { + maxTarget = minTarget } if bias < pp.activeBias { bias = pp.activeBias } - c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo) + c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo) if c == nil { - log.Error("RequestCapacity called for unknown node", "id", node.ID()) - return math.MaxInt64, false + log.Error("requestCapacity called for unknown node", "id", node.ID()) + return 0 } - var priority int64 - if targetCap > c.capacity { - priority = c.nodePriority.EstimatePriority(targetCap, 0, 0, bias, false) - } else { - priority = c.nodePriority.Priority(targetCap) + pp.setTempState(c) + if maxTarget > c.capacity { + c.bias = bias + c.stepDiv = pp.fineStepDiv } - pp.markForChange(c) - pp.setCapacity(c, targetCap) - c.forced = true + pp.setTempCapacity(c, maxTarget) + c.minTarget = minTarget pp.activeQueue.Remove(c.activeIndex) pp.inactiveQueue.Remove(c.inactiveIndex) pp.activeQueue.Push(c) - _, minPriority = pp.enforceLimits() - // if capacity update is possible now then minPriority == math.MinInt64 - // if it is not possible at all then minPriority == math.MaxInt64 - allowed = priority > minPriority - updates = pp.finalizeChanges(setCap && allowed) - return + pp.enforceLimits() + updates = pp.finalizeChanges(c.tempCapacity >= minTarget && c.tempCapacity <= maxTarget && c.tempCapacity != c.capacity) + return c.capacity } // SetLimits sets the maximum number and total capacity of simultaneously active nodes -func (pp *PriorityPool) SetLimits(maxCount, maxCap uint64) { +func (pp *priorityPool) SetLimits(maxCount, maxCap uint64) { pp.lock.Lock() pp.activeQueue.Refresh() var updates []capUpdate @@ -247,27 +210,38 @@ func (pp *PriorityPool) SetLimits(maxCount, maxCap uint64) { updates = pp.finalizeChanges(true) } if inc { - updates = pp.tryActivate() + updates = append(updates, pp.tryActivate(false)...) } } -// SetActiveBias sets the bias applied when trying to activate inactive nodes -func (pp *PriorityPool) SetActiveBias(bias time.Duration) { +// setActiveBias sets the bias applied when trying to activate inactive nodes +func (pp *priorityPool) setActiveBias(bias time.Duration) { pp.lock.Lock() - defer pp.lock.Unlock() - pp.activeBias = bias - pp.tryActivate() + if pp.activeBias < time.Duration(1) { + pp.activeBias = time.Duration(1) + } + updates := pp.tryActivate(false) + pp.lock.Unlock() + pp.ns.Operation(func() { pp.updateFlags(updates) }) } // Active returns the number and total capacity of currently active nodes -func (pp *PriorityPool) Active() (uint64, uint64) { +func (pp *priorityPool) Active() (uint64, uint64) { pp.lock.Lock() defer pp.lock.Unlock() return pp.activeCount, pp.activeCap } +// Limits returns the maximum allowed number and total capacity of active nodes +func (pp *priorityPool) Limits() (uint64, uint64) { + pp.lock.Lock() + defer pp.lock.Unlock() + + return pp.maxCount, pp.maxCap +} + // inactiveSetIndex callback updates ppNodeInfo item index in inactiveQueue func inactiveSetIndex(a interface{}, index int) { a.(*ppNodeInfo).inactiveIndex = index @@ -290,37 +264,31 @@ func invertPriority(p int64) int64 { // activePriority callback returns actual priority of ppNodeInfo item in activeQueue func activePriority(a interface{}) int64 { c := a.(*ppNodeInfo) - if c.forced { - return math.MinInt64 - } if c.bias == 0 { - return invertPriority(c.nodePriority.Priority(c.capacity)) + return invertPriority(c.nodePriority.priority(c.tempCapacity)) } else { - return invertPriority(c.nodePriority.EstimatePriority(c.capacity, 0, 0, c.bias, true)) + return invertPriority(c.nodePriority.estimatePriority(c.tempCapacity, 0, 0, c.bias, true)) } } // activeMaxPriority callback returns estimated maximum priority of ppNodeInfo item in activeQueue -func (pp *PriorityPool) activeMaxPriority(a interface{}, until mclock.AbsTime) int64 { +func (pp *priorityPool) activeMaxPriority(a interface{}, until mclock.AbsTime) int64 { c := a.(*ppNodeInfo) - if c.forced { - return math.MinInt64 - } future := time.Duration(until - pp.clock.Now()) if future < 0 { future = 0 } - return invertPriority(c.nodePriority.EstimatePriority(c.capacity, 0, future, c.bias, false)) + return invertPriority(c.nodePriority.estimatePriority(c.tempCapacity, 0, future, c.bias, false)) } // inactivePriority callback returns actual priority of ppNodeInfo item in inactiveQueue -func (pp *PriorityPool) inactivePriority(p *ppNodeInfo) int64 { - return p.nodePriority.Priority(pp.minCap) +func (pp *priorityPool) inactivePriority(p *ppNodeInfo) int64 { + return p.nodePriority.priority(pp.minCap) } -// connectedNode is called when a new node has been added to the pool (InactiveFlag set) +// connectedNode is called when a new node has been added to the pool (inactiveFlag set) // Note: this function should run inside a NodeStateMachine operation -func (pp *PriorityPool) connectedNode(c *ppNodeInfo) { +func (pp *priorityPool) connectedNode(c *ppNodeInfo) { pp.lock.Lock() pp.activeQueue.Refresh() var updates []capUpdate @@ -334,13 +302,13 @@ func (pp *PriorityPool) connectedNode(c *ppNodeInfo) { } c.connected = true pp.inactiveQueue.Push(c, pp.inactivePriority(c)) - updates = pp.tryActivate() + updates = pp.tryActivate(false) } -// disconnectedNode is called when a node has been removed from the pool (both InactiveFlag -// and ActiveFlag reset) +// disconnectedNode is called when a node has been removed from the pool (both inactiveFlag +// and activeFlag reset) // Note: this function should run inside a NodeStateMachine operation -func (pp *PriorityPool) disconnectedNode(c *ppNodeInfo) { +func (pp *priorityPool) disconnectedNode(c *ppNodeInfo) { pp.lock.Lock() pp.activeQueue.Refresh() var updates []capUpdate @@ -356,42 +324,51 @@ func (pp *PriorityPool) disconnectedNode(c *ppNodeInfo) { pp.activeQueue.Remove(c.activeIndex) pp.inactiveQueue.Remove(c.inactiveIndex) if c.capacity != 0 { - pp.setCapacity(c, 0) - updates = pp.tryActivate() + pp.setTempState(c) + pp.setTempCapacity(c, 0) + updates = pp.tryActivate(true) } } -// markForChange internally puts a node in a temporary state that can either be reverted +// setTempState internally puts a node in a temporary state that can either be reverted // or confirmed later. This temporary state allows changing the capacity of a node and -// moving it between the active and inactive queue. ActiveFlag/InactiveFlag and -// CapacityField are not changed while the changes are still temporary. -func (pp *PriorityPool) markForChange(c *ppNodeInfo) { - if c.changed { +// moving it between the active and inactive queue. activeFlag/inactiveFlag and +// capacityField are not changed while the changes are still temporary. +func (pp *priorityPool) setTempState(c *ppNodeInfo) { + if c.tempState { return } - c.changed = true - c.origCap = c.capacity - pp.changed = append(pp.changed, c) + c.tempState = true + if c.tempCapacity != c.capacity { // should never happen + log.Error("tempCapacity != capacity when entering tempState") + } + c.minTarget = pp.minCap + c.stepDiv = pp.capacityStepDiv + pp.tempState = append(pp.tempState, c) } -// setCapacity changes the capacity of a node and adjusts activeCap and activeCount -// accordingly. Note that this change is performed in the temporary state so it should -// be called after markForChange and before finalizeChanges. -func (pp *PriorityPool) setCapacity(n *ppNodeInfo, cap uint64) { - pp.activeCap += cap - n.capacity - if n.capacity == 0 { +// setTempCapacity changes the capacity of a node in the temporary state and adjusts +// activeCap and activeCount accordingly. Since this change is performed in the temporary +// state it should be called after setTempState and before finalizeChanges. +func (pp *priorityPool) setTempCapacity(n *ppNodeInfo, cap uint64) { + if !n.tempState { // should never happen + log.Error("Node is not in temporary state") + return + } + pp.activeCap += cap - n.tempCapacity + if n.tempCapacity == 0 { pp.activeCount++ } if cap == 0 { pp.activeCount-- } - n.capacity = cap + n.tempCapacity = cap } // enforceLimits enforces active node count and total capacity limits. It returns the // lowest active node priority. Note that this function is performed on the temporary // internal state. -func (pp *PriorityPool) enforceLimits() (*ppNodeInfo, int64) { +func (pp *priorityPool) enforceLimits() (*ppNodeInfo, int64) { if pp.activeCap <= pp.maxCap && pp.activeCount <= pp.maxCount { return nil, math.MinInt64 } @@ -401,16 +378,19 @@ func (pp *PriorityPool) enforceLimits() (*ppNodeInfo, int64) { ) pp.activeQueue.MultiPop(func(data interface{}, priority int64) bool { c = data.(*ppNodeInfo) - pp.markForChange(c) + pp.setTempState(c) maxActivePriority = priority - if c.capacity == pp.minCap || pp.activeCount > pp.maxCount { - pp.setCapacity(c, 0) + if c.tempCapacity == c.minTarget || pp.activeCount > pp.maxCount { + pp.setTempCapacity(c, 0) } else { - sub := c.capacity / pp.capacityStepDiv - if c.capacity-sub < pp.minCap { - sub = c.capacity - pp.minCap + sub := c.tempCapacity / c.stepDiv + if sub == 0 { + sub = 1 } - pp.setCapacity(c, c.capacity-sub) + if c.tempCapacity-sub < c.minTarget { + sub = c.tempCapacity - c.minTarget + } + pp.setTempCapacity(c, c.tempCapacity-sub) pp.activeQueue.Push(c) } return pp.activeCap > pp.maxCap || pp.activeCount > pp.maxCount @@ -421,71 +401,74 @@ func (pp *PriorityPool) enforceLimits() (*ppNodeInfo, int64) { // finalizeChanges either commits or reverts temporary changes. The necessary capacity // field and according flag updates are not performed here but returned in a list because // they should be performed while the mutex is not held. -func (pp *PriorityPool) finalizeChanges(commit bool) (updates []capUpdate) { - for _, c := range pp.changed { - // always remove and push back in order to update biased/forced priority +func (pp *priorityPool) finalizeChanges(commit bool) (updates []capUpdate) { + for _, c := range pp.tempState { + // always remove and push back in order to update biased priority pp.activeQueue.Remove(c.activeIndex) pp.inactiveQueue.Remove(c.inactiveIndex) - c.bias = 0 - c.forced = false - c.changed = false - if !commit { - pp.setCapacity(c, c.origCap) + oldCapacity := c.capacity + if commit { + c.capacity = c.tempCapacity + } else { + pp.setTempCapacity(c, c.capacity) // revert activeCount/activeCap } + c.tempState = false + c.bias = 0 + c.stepDiv = pp.capacityStepDiv + c.minTarget = pp.minCap if c.connected { if c.capacity != 0 { pp.activeQueue.Push(c) } else { pp.inactiveQueue.Push(c, pp.inactivePriority(c)) } - if c.capacity != c.origCap && commit { - updates = append(updates, capUpdate{c.node, c.origCap, c.capacity}) + if c.capacity != oldCapacity { + updates = append(updates, capUpdate{c.node, oldCapacity, c.capacity}) } } - c.origCap = 0 } - pp.changed = nil + pp.tempState = nil if commit { pp.ccUpdateForced = true } return } -// capUpdate describes a CapacityField and ActiveFlag/InactiveFlag update +// capUpdate describes a capacityField and activeFlag/inactiveFlag update type capUpdate struct { node *enode.Node oldCap, newCap uint64 } -// updateFlags performs CapacityField and ActiveFlag/InactiveFlag updates while the +// updateFlags performs capacityField and activeFlag/inactiveFlag updates while the // pool mutex is not held // Note: this function should run inside a NodeStateMachine operation -func (pp *PriorityPool) updateFlags(updates []capUpdate) { +func (pp *priorityPool) updateFlags(updates []capUpdate) { for _, f := range updates { if f.oldCap == 0 { - pp.ns.SetStateSub(f.node, pp.ActiveFlag, pp.InactiveFlag, 0) + pp.ns.SetStateSub(f.node, pp.setup.activeFlag, pp.setup.inactiveFlag, 0) } if f.newCap == 0 { - pp.ns.SetStateSub(f.node, pp.InactiveFlag, pp.ActiveFlag, 0) - pp.ns.SetFieldSub(f.node, pp.CapacityField, nil) + pp.ns.SetStateSub(f.node, pp.setup.inactiveFlag, pp.setup.activeFlag, 0) + pp.ns.SetFieldSub(f.node, pp.setup.capacityField, nil) } else { - pp.ns.SetFieldSub(f.node, pp.CapacityField, f.newCap) + pp.ns.SetFieldSub(f.node, pp.setup.capacityField, f.newCap) } } } // tryActivate tries to activate inactive nodes if possible -func (pp *PriorityPool) tryActivate() []capUpdate { - var commit bool +func (pp *priorityPool) tryActivate(commit bool) []capUpdate { for pp.inactiveQueue.Size() > 0 { c := pp.inactiveQueue.PopItem().(*ppNodeInfo) - pp.markForChange(c) - pp.setCapacity(c, pp.minCap) + pp.setTempState(c) + pp.setTempCapacity(c, pp.minCap) c.bias = pp.activeBias pp.activeQueue.Push(c) pp.enforceLimits() - if c.capacity > 0 { + if c.tempCapacity > 0 { commit = true + c.bias = 0 } else { break } @@ -497,7 +480,7 @@ func (pp *PriorityPool) tryActivate() []capUpdate { // updatePriority gets the current priority value of the given node from the nodePriority // interface and performs the necessary changes. It is triggered by updateFlag. // Note: this function should run inside a NodeStateMachine operation -func (pp *PriorityPool) updatePriority(node *enode.Node) { +func (pp *priorityPool) updatePriority(node *enode.Node) { pp.lock.Lock() pp.activeQueue.Refresh() var updates []capUpdate @@ -506,7 +489,7 @@ func (pp *PriorityPool) updatePriority(node *enode.Node) { pp.updateFlags(updates) }() - c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo) + c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo) if c == nil || !c.connected { return } @@ -517,15 +500,15 @@ func (pp *PriorityPool) updatePriority(node *enode.Node) { } else { pp.inactiveQueue.Push(c, pp.inactivePriority(c)) } - updates = pp.tryActivate() + updates = pp.tryActivate(false) } -// CapacityCurve is a snapshot of the priority pool contents in a format that can efficiently +// capacityCurve is a snapshot of the priority pool contents in a format that can efficiently // estimate how much capacity could be granted to a given node at a given priority level. -type CapacityCurve struct { +type capacityCurve struct { points []curvePoint // curve points sorted in descending order of priority index map[enode.ID][]int // curve point indexes belonging to each node - exclude []int // curve point indexes of excluded node + excludeList []int // curve point indexes of excluded node excludeFirst bool // true if activeCount == maxCount } @@ -534,8 +517,8 @@ type curvePoint struct { nextPri int64 // next priority level where more capacity will be available } -// GetCapacityCurve returns a new or recently cached CapacityCurve based on the contents of the pool -func (pp *PriorityPool) GetCapacityCurve() *CapacityCurve { +// getCapacityCurve returns a new or recently cached capacityCurve based on the contents of the pool +func (pp *priorityPool) getCapacityCurve() *capacityCurve { pp.lock.Lock() defer pp.lock.Unlock() @@ -547,7 +530,7 @@ func (pp *PriorityPool) GetCapacityCurve() *CapacityCurve { pp.ccUpdateForced = false pp.ccUpdatedAt = now - curve := &CapacityCurve{ + curve := &capacityCurve{ index: make(map[enode.ID][]int), } pp.cachedCurve = curve @@ -556,6 +539,7 @@ func (pp *PriorityPool) GetCapacityCurve() *CapacityCurve { excludeFirst := pp.maxCount == pp.activeCount // reduce node capacities or remove nodes until nothing is left in the queue; // record the available capacity and the necessary priority after each step + lastPri := int64(math.MinInt64) for pp.activeCap > 0 { cp := curvePoint{} if pp.activeCap > pp.maxCap { @@ -570,9 +554,15 @@ func (pp *PriorityPool) GetCapacityCurve() *CapacityCurve { // enforceLimits removes the lowest priority node if it has minimal capacity, // otherwise reduces its capacity next, cp.nextPri = pp.enforceLimits() + if cp.nextPri < lastPri { + // enforce monotonicity which may be broken by continuously changing priorities + cp.nextPri = lastPri + } else { + lastPri = cp.nextPri + } pp.activeCap -= tempCap if next == nil { - log.Error("GetCapacityCurve: cannot remove next element from the priority queue") + log.Error("getCapacityCurve: cannot remove next element from the priority queue") break } id := next.node.ID() @@ -595,34 +585,34 @@ func (pp *PriorityPool) GetCapacityCurve() *CapacityCurve { nextPri: math.MaxInt64, }) if curve.excludeFirst { - curve.exclude = curve.index[excludeID] + curve.excludeList = curve.index[excludeID] } return curve } -// Exclude returns a CapacityCurve with the given node excluded from the original curve -func (cc *CapacityCurve) Exclude(id enode.ID) *CapacityCurve { - if exclude, ok := cc.index[id]; ok { +// exclude returns a capacityCurve with the given node excluded from the original curve +func (cc *capacityCurve) exclude(id enode.ID) *capacityCurve { + if excludeList, ok := cc.index[id]; ok { // return a new version of the curve (only one excluded node can be selected) // Note: if the first node was excluded by default (excludeFirst == true) then // we can forget about that and exclude the node with the given id instead. - return &CapacityCurve{ - points: cc.points, - index: cc.index, - exclude: exclude, + return &capacityCurve{ + points: cc.points, + index: cc.index, + excludeList: excludeList, } } return cc } -func (cc *CapacityCurve) getPoint(i int) curvePoint { +func (cc *capacityCurve) getPoint(i int) curvePoint { cp := cc.points[i] if i == 0 && cc.excludeFirst { cp.freeCap = 0 return cp } - for ii := len(cc.exclude) - 1; ii >= 0; ii-- { - ei := cc.exclude[ii] + for ii := len(cc.excludeList) - 1; ii >= 0; ii-- { + ei := cc.excludeList[ii] if ei < i { break } @@ -632,11 +622,11 @@ func (cc *CapacityCurve) getPoint(i int) curvePoint { return cp } -// MaxCapacity calculates the maximum capacity available for a node with a given +// maxCapacity calculates the maximum capacity available for a node with a given // (monotonically decreasing) priority vs. capacity function. Note that if the requesting // node is already in the pool then it should be excluded from the curve in order to get // the correct result. -func (cc *CapacityCurve) MaxCapacity(priority func(cap uint64) int64) uint64 { +func (cc *capacityCurve) maxCapacity(priority func(cap uint64) int64) uint64 { min, max := 0, len(cc.points)-1 // the curve always has at least one point for min < max { mid := (min + max) / 2 diff --git a/les/vflux/server/prioritypool_test.go b/les/vflux/server/prioritypool_test.go index d83ddc1767..5152312116 100644 --- a/les/vflux/server/prioritypool_test.go +++ b/les/vflux/server/prioritypool_test.go @@ -28,18 +28,6 @@ import ( "github.com/ethereum/go-ethereum/p2p/nodestate" ) -var ( - testSetup = &nodestate.Setup{} - ppTestClientFlag = testSetup.NewFlag("ppTestClientFlag") - ppTestClientField = testSetup.NewField("ppTestClient", reflect.TypeOf(&ppTestClient{})) - ppUpdateFlag = testSetup.NewFlag("ppUpdateFlag") - ppTestSetup = NewPriorityPoolSetup(testSetup) -) - -func init() { - ppTestSetup.Connect(ppTestClientField, ppUpdateFlag) -} - const ( testCapacityStepDiv = 100 testCapacityToleranceDiv = 10 @@ -51,25 +39,27 @@ type ppTestClient struct { balance, cap uint64 } -func (c *ppTestClient) Priority(cap uint64) int64 { +func (c *ppTestClient) priority(cap uint64) int64 { return int64(c.balance / cap) } -func (c *ppTestClient) EstimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64 { +func (c *ppTestClient) estimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64 { return int64(c.balance / cap) } func TestPriorityPool(t *testing.T) { clock := &mclock.Simulated{} - ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) + setup := newServerSetup() + setup.balanceField = setup.setup.NewField("ppTestClient", reflect.TypeOf(&ppTestClient{})) + ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup) - ns.SubscribeField(ppTestSetup.CapacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { - if n := ns.GetField(node, ppTestSetup.priorityField); n != nil { + ns.SubscribeField(setup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + if n := ns.GetField(node, setup.balanceField); n != nil { c := n.(*ppTestClient) c.cap = newValue.(uint64) } }) - pp := NewPriorityPool(ns, ppTestSetup, clock, testMinCap, 0, testCapacityStepDiv) + pp := newPriorityPool(ns, setup, clock, testMinCap, 0, testCapacityStepDiv, testCapacityStepDiv) ns.Start() pp.SetLimits(100, 1000000) clients := make([]*ppTestClient, 100) @@ -77,7 +67,8 @@ func TestPriorityPool(t *testing.T) { for { var ok bool ns.Operation(func() { - _, ok = pp.RequestCapacity(c.node, c.cap+c.cap/testCapacityStepDiv, 0, true) + newCap := c.cap + c.cap/testCapacityStepDiv + ok = pp.requestCapacity(c.node, newCap, newCap, 0) == newCap }) if !ok { return @@ -101,9 +92,8 @@ func TestPriorityPool(t *testing.T) { } sumBalance += c.balance clients[i] = c - ns.SetState(c.node, ppTestClientFlag, nodestate.Flags{}, 0) - ns.SetField(c.node, ppTestSetup.priorityField, c) - ns.SetState(c.node, ppTestSetup.InactiveFlag, nodestate.Flags{}, 0) + ns.SetField(c.node, setup.balanceField, c) + ns.SetState(c.node, setup.inactiveFlag, nodestate.Flags{}, 0) raise(c) check(c) } @@ -113,8 +103,8 @@ func TestPriorityPool(t *testing.T) { oldBalance := c.balance c.balance = uint64(rand.Int63n(100000000000) + 100000000000) sumBalance += c.balance - oldBalance - pp.ns.SetState(c.node, ppUpdateFlag, nodestate.Flags{}, 0) - pp.ns.SetState(c.node, nodestate.Flags{}, ppUpdateFlag, 0) + pp.ns.SetState(c.node, setup.updateFlag, nodestate.Flags{}, 0) + pp.ns.SetState(c.node, nodestate.Flags{}, setup.updateFlag, 0) if c.balance > oldBalance { raise(c) } else { @@ -129,32 +119,28 @@ func TestPriorityPool(t *testing.T) { if count%10 == 0 { // test available capacity calculation with capacity curve c = clients[rand.Intn(len(clients))] - curve := pp.GetCapacityCurve().Exclude(c.node.ID()) + curve := pp.getCapacityCurve().exclude(c.node.ID()) add := uint64(rand.Int63n(10000000000000)) c.balance += add sumBalance += add - expCap := curve.MaxCapacity(func(cap uint64) int64 { + expCap := curve.maxCapacity(func(cap uint64) int64 { return int64(c.balance / cap) }) - //fmt.Println(expCap, c.balance, sumBalance) - /*for i, cp := range curve.points { - fmt.Println("cp", i, cp, "ex", curve.getPoint(i)) - }*/ var ok bool - expFail := expCap + 1 + expFail := expCap + 10 if expFail < testMinCap { expFail = testMinCap } ns.Operation(func() { - _, ok = pp.RequestCapacity(c.node, expFail, 0, true) + ok = pp.requestCapacity(c.node, expFail, expFail, 0) == expFail }) if ok { t.Errorf("Request for more than expected available capacity succeeded") } if expCap >= testMinCap { ns.Operation(func() { - _, ok = pp.RequestCapacity(c.node, expCap, 0, true) + ok = pp.requestCapacity(c.node, expCap, expCap, 0) == expCap }) if !ok { t.Errorf("Request for expected available capacity failed") @@ -162,8 +148,8 @@ func TestPriorityPool(t *testing.T) { } c.balance -= add sumBalance -= add - pp.ns.SetState(c.node, ppUpdateFlag, nodestate.Flags{}, 0) - pp.ns.SetState(c.node, nodestate.Flags{}, ppUpdateFlag, 0) + pp.ns.SetState(c.node, setup.updateFlag, nodestate.Flags{}, 0) + pp.ns.SetState(c.node, nodestate.Flags{}, setup.updateFlag, 0) for _, c := range clients { raise(c) } @@ -175,8 +161,11 @@ func TestPriorityPool(t *testing.T) { func TestCapacityCurve(t *testing.T) { clock := &mclock.Simulated{} - ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) - pp := NewPriorityPool(ns, ppTestSetup, clock, 400000, 0, 2) + setup := newServerSetup() + setup.balanceField = setup.setup.NewField("ppTestClient", reflect.TypeOf(&ppTestClient{})) + ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup) + + pp := newPriorityPool(ns, setup, clock, 400000, 0, 2, 2) ns.Start() pp.SetLimits(10, 10000000) clients := make([]*ppTestClient, 10) @@ -188,17 +177,16 @@ func TestCapacityCurve(t *testing.T) { cap: 1000000, } clients[i] = c - ns.SetState(c.node, ppTestClientFlag, nodestate.Flags{}, 0) - ns.SetField(c.node, ppTestSetup.priorityField, c) - ns.SetState(c.node, ppTestSetup.InactiveFlag, nodestate.Flags{}, 0) + ns.SetField(c.node, setup.balanceField, c) + ns.SetState(c.node, setup.inactiveFlag, nodestate.Flags{}, 0) ns.Operation(func() { - pp.RequestCapacity(c.node, c.cap, 0, true) + pp.requestCapacity(c.node, c.cap, c.cap, 0) }) } - curve := pp.GetCapacityCurve() + curve := pp.getCapacityCurve() check := func(balance, expCap uint64) { - cap := curve.MaxCapacity(func(cap uint64) int64 { + cap := curve.maxCapacity(func(cap uint64) int64 { return int64(balance / cap) }) var fail bool @@ -226,7 +214,7 @@ func TestCapacityCurve(t *testing.T) { check(1000000000000, 2500000) pp.SetLimits(11, 10000000) - curve = pp.GetCapacityCurve() + curve = pp.getCapacityCurve() check(0, 0) check(10000000000, 100000) diff --git a/les/vflux/server/service.go b/les/vflux/server/service.go index ab759ae441..80a0f47543 100644 --- a/les/vflux/server/service.go +++ b/les/vflux/server/service.go @@ -40,7 +40,6 @@ type ( // Service is a service registered at the Server and identified by a string id Service interface { - ServiceInfo() (id, desc string) // only called during registration Handle(id enode.ID, address string, name string, data []byte) []byte // never called concurrently } @@ -60,9 +59,8 @@ func NewServer(delayPerRequest time.Duration) *Server { } // Register registers a Service -func (s *Server) Register(b Service) { - srv := &serviceEntry{backend: b} - srv.id, srv.desc = b.ServiceInfo() +func (s *Server) Register(b Service, id, desc string) { + srv := &serviceEntry{backend: b, id: id, desc: desc} if strings.Contains(srv.id, ":") { // srv.id + ":" will be used as a service database prefix log.Error("Service ID contains ':'", "id", srv.id) diff --git a/les/vflux/server/status.go b/les/vflux/server/status.go new file mode 100644 index 0000000000..469190777b --- /dev/null +++ b/les/vflux/server/status.go @@ -0,0 +1,59 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package server + +import ( + "reflect" + + "github.com/ethereum/go-ethereum/p2p/nodestate" +) + +type peerWrapper struct{ clientPeer } // the NodeStateMachine type system needs this wrapper + +// serverSetup is a wrapper of the node state machine setup, which contains +// all the created flags and fields used in the vflux server side. +type serverSetup struct { + setup *nodestate.Setup + clientField nodestate.Field // Field contains the client peer handler + + // Flags and fields controlled by balance tracker. BalanceTracker + // is responsible for setting/deleting these flags or fields. + priorityFlag nodestate.Flags // Flag is set if the node has a positive balance + updateFlag nodestate.Flags // Flag is set whenever the node balance is changed(priority changed) + balanceField nodestate.Field // Field contains the client balance for priority calculation + + // Flags and fields controlled by priority queue. Priority queue + // is responsible for setting/deleting these flags or fields. + activeFlag nodestate.Flags // Flag is set if the node is active + inactiveFlag nodestate.Flags // Flag is set if the node is inactive + capacityField nodestate.Field // Field contains the capacity of the node + queueField nodestate.Field // Field contains the infomration in the priority queue +} + +// newServerSetup initializes the setup for state machine and returns the flags/fields group. +func newServerSetup() *serverSetup { + setup := &serverSetup{setup: &nodestate.Setup{}} + setup.clientField = setup.setup.NewField("client", reflect.TypeOf(peerWrapper{})) + setup.priorityFlag = setup.setup.NewFlag("priority") + setup.updateFlag = setup.setup.NewFlag("update") + setup.balanceField = setup.setup.NewField("balance", reflect.TypeOf(&nodeBalance{})) + setup.activeFlag = setup.setup.NewFlag("active") + setup.inactiveFlag = setup.setup.NewFlag("inactive") + setup.capacityField = setup.setup.NewField("capacity", reflect.TypeOf(uint64(0))) + setup.queueField = setup.setup.NewField("queue", reflect.TypeOf(&ppNodeInfo{})) + return setup +} diff --git a/metrics/histogram.go b/metrics/histogram.go index 46f3bbd2f1..2c54ce8b40 100644 --- a/metrics/histogram.go +++ b/metrics/histogram.go @@ -26,6 +26,15 @@ func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram { return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram) } +// GetOrRegisterHistogramLazy returns an existing Histogram or constructs and +// registers a new StandardHistogram. +func GetOrRegisterHistogramLazy(name string, r Registry, s func() Sample) Histogram { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, func() Histogram { return NewHistogram(s()) }).(Histogram) +} + // NewHistogram constructs a new StandardHistogram from a Sample. func NewHistogram(s Sample) Histogram { if !Enabled { diff --git a/metrics/influxdb/influxdb.go b/metrics/influxdb/influxdb.go index d4fc478e7b..52d0091034 100644 --- a/metrics/influxdb/influxdb.go +++ b/metrics/influxdb/influxdb.go @@ -162,26 +162,29 @@ func (r *reporter) send() error { }) case metrics.Histogram: ms := metric.Snapshot() - ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}) - pts = append(pts, client.Point{ - Measurement: fmt.Sprintf("%s%s.histogram", namespace, name), - Tags: r.tags, - Fields: map[string]interface{}{ - "count": ms.Count(), - "max": ms.Max(), - "mean": ms.Mean(), - "min": ms.Min(), - "stddev": ms.StdDev(), - "variance": ms.Variance(), - "p50": ps[0], - "p75": ps[1], - "p95": ps[2], - "p99": ps[3], - "p999": ps[4], - "p9999": ps[5], - }, - Time: now, - }) + + if ms.Count() > 0 { + ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}) + pts = append(pts, client.Point{ + Measurement: fmt.Sprintf("%s%s.histogram", namespace, name), + Tags: r.tags, + Fields: map[string]interface{}{ + "count": ms.Count(), + "max": ms.Max(), + "mean": ms.Mean(), + "min": ms.Min(), + "stddev": ms.StdDev(), + "variance": ms.Variance(), + "p50": ps[0], + "p75": ps[1], + "p95": ps[2], + "p99": ps[3], + "p999": ps[4], + "p9999": ps[5], + }, + Time: now, + }) + } case metrics.Meter: ms := metric.Snapshot() pts = append(pts, client.Point{ diff --git a/metrics/resetting_sample.go b/metrics/resetting_sample.go new file mode 100644 index 0000000000..43c1129cd0 --- /dev/null +++ b/metrics/resetting_sample.go @@ -0,0 +1,24 @@ +package metrics + +// ResettingSample converts an ordinary sample into one that resets whenever its +// snapshot is retrieved. This will break for multi-monitor systems, but when only +// a single metric is being pushed out, this ensure that low-frequency events don't +// skew th charts indefinitely. +func ResettingSample(sample Sample) Sample { + return &resettingSample{ + Sample: sample, + } +} + +// resettingSample is a simple wrapper around a sample that resets it upon the +// snapshot retrieval. +type resettingSample struct { + Sample +} + +// Snapshot returns a read-only copy of the sample with the original reset. +func (rs *resettingSample) Snapshot() Sample { + s := rs.Sample.Snapshot() + rs.Sample.Clear() + return s +} diff --git a/miner/miner.go b/miner/miner.go index ef431bfa0c..c1f58dcdc4 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -43,14 +43,15 @@ type Backend interface { // Config is the configuration parameters of mining. type Config struct { - Etherbase common.Address `toml:",omitempty"` // Public address for block mining rewards (default = first account) - Notify []string `toml:",omitempty"` // HTTP URL list to be notified of new work packages(only useful in ethash). - ExtraData hexutil.Bytes `toml:",omitempty"` // Block extra data set by the miner - GasFloor uint64 // Target gas floor for mined blocks. - GasCeil uint64 // Target gas ceiling for mined blocks. - GasPrice *big.Int // Minimum gas price for mining a transaction - Recommit time.Duration // The time interval for miner to re-create mining work. - Noverify bool // Disable remote mining solution verification(only useful in ethash). + Etherbase common.Address `toml:",omitempty"` // Public address for block mining rewards (default = first account) + Notify []string `toml:",omitempty"` // HTTP URL list to be notified of new work packages (only useful in ethash). + NotifyFull bool `toml:",omitempty"` // Notify with pending block headers instead of work packages + ExtraData hexutil.Bytes `toml:",omitempty"` // Block extra data set by the miner + GasFloor uint64 // Target gas floor for mined blocks. + GasCeil uint64 // Target gas ceiling for mined blocks. + GasPrice *big.Int // Minimum gas price for mining a transaction + Recommit time.Duration // The time interval for miner to re-create mining work. + Noverify bool // Disable remote mining solution verification(only useful in ethash). } // Miner creates blocks and searches for proof-of-work values. @@ -160,7 +161,7 @@ func (miner *Miner) Mining() bool { return miner.worker.isRunning() } -func (miner *Miner) HashRate() uint64 { +func (miner *Miner) Hashrate() uint64 { if pow, ok := miner.engine.(consensus.PoW); ok { return uint64(pow.Hashrate()) } diff --git a/node/api.go b/node/api.go index 083784f4e4..be20b89d95 100644 --- a/node/api.go +++ b/node/api.go @@ -24,6 +24,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/internal/debug" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/rpc" @@ -162,8 +163,8 @@ func (api *privateAdminAPI) PeerEvents(ctx context.Context) (*rpc.Subscription, return rpcSub, nil } -// StartRPC starts the HTTP RPC API server. -func (api *privateAdminAPI) StartRPC(host *string, port *int, cors *string, apis *string, vhosts *string) (bool, error) { +// StartHTTP starts the HTTP RPC API server. +func (api *privateAdminAPI) StartHTTP(host *string, port *int, cors *string, apis *string, vhosts *string) (bool, error) { api.node.lock.Lock() defer api.node.lock.Unlock() @@ -216,12 +217,26 @@ func (api *privateAdminAPI) StartRPC(host *string, port *int, cors *string, apis return true, nil } -// StopRPC shuts down the HTTP server. -func (api *privateAdminAPI) StopRPC() (bool, error) { +// StartRPC starts the HTTP RPC API server. +// This method is deprecated. Use StartHTTP instead. +func (api *privateAdminAPI) StartRPC(host *string, port *int, cors *string, apis *string, vhosts *string) (bool, error) { + log.Warn("Deprecation warning", "method", "admin.StartRPC", "use-instead", "admin.StartHTTP") + return api.StartHTTP(host, port, cors, apis, vhosts) +} + +// StopHTTP shuts down the HTTP server. +func (api *privateAdminAPI) StopHTTP() (bool, error) { api.node.http.stop() return true, nil } +// StopRPC shuts down the HTTP server. +// This method is deprecated. Use StopHTTP instead. +func (api *privateAdminAPI) StopRPC() (bool, error) { + log.Warn("Deprecation warning", "method", "admin.StopRPC", "use-instead", "admin.StopHTTP") + return api.StopHTTP() +} + // StartWS starts the websocket RPC API server. func (api *privateAdminAPI) StartWS(host *string, port *int, allowedOrigins *string, apis *string) (bool, error) { api.node.lock.Lock() diff --git a/node/api_test.go b/node/api_test.go index 9c3fa3a31d..9549adf9c2 100644 --- a/node/api_test.go +++ b/node/api_test.go @@ -69,7 +69,7 @@ func TestStartRPC(t *testing.T) { name: "rpc enabled through API", cfg: Config{}, fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - _, err := api.StartRPC(sp("127.0.0.1"), ip(0), nil, nil, nil) + _, err := api.StartHTTP(sp("127.0.0.1"), ip(0), nil, nil, nil) assert.NoError(t, err) }, wantReachable: true, @@ -90,14 +90,14 @@ func TestStartRPC(t *testing.T) { port := listener.Addr().(*net.TCPAddr).Port // Now try to start RPC on that port. This should fail. - _, err = api.StartRPC(sp("127.0.0.1"), ip(port), nil, nil, nil) + _, err = api.StartHTTP(sp("127.0.0.1"), ip(port), nil, nil, nil) if err == nil { - t.Fatal("StartRPC should have failed on port", port) + t.Fatal("StartHTTP should have failed on port", port) } // Try again after unblocking the port. It should work this time. listener.Close() - _, err = api.StartRPC(sp("127.0.0.1"), ip(port), nil, nil, nil) + _, err = api.StartHTTP(sp("127.0.0.1"), ip(port), nil, nil, nil) assert.NoError(t, err) }, wantReachable: true, @@ -109,7 +109,7 @@ func TestStartRPC(t *testing.T) { name: "rpc stopped through API", cfg: Config{HTTPHost: "127.0.0.1"}, fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - _, err := api.StopRPC() + _, err := api.StopHTTP() assert.NoError(t, err) }, wantReachable: false, @@ -121,10 +121,10 @@ func TestStartRPC(t *testing.T) { name: "rpc stopped twice", cfg: Config{HTTPHost: "127.0.0.1"}, fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - _, err := api.StopRPC() + _, err := api.StopHTTP() assert.NoError(t, err) - _, err = api.StopRPC() + _, err = api.StopHTTP() assert.NoError(t, err) }, wantReachable: false, @@ -211,14 +211,14 @@ func TestStartRPC(t *testing.T) { { name: "rpc stopped with ws enabled", fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - _, err := api.StartRPC(sp("127.0.0.1"), ip(0), nil, nil, nil) + _, err := api.StartHTTP(sp("127.0.0.1"), ip(0), nil, nil, nil) assert.NoError(t, err) wsport := n.http.port _, err = api.StartWS(sp("127.0.0.1"), ip(wsport), nil, nil) assert.NoError(t, err) - _, err = api.StopRPC() + _, err = api.StopHTTP() assert.NoError(t, err) }, wantReachable: false, @@ -233,7 +233,7 @@ func TestStartRPC(t *testing.T) { assert.NoError(t, err) wsport := n.http.port - _, err = api.StartRPC(sp("127.0.0.1"), ip(wsport), nil, nil, nil) + _, err = api.StartHTTP(sp("127.0.0.1"), ip(wsport), nil, nil, nil) assert.NoError(t, err) }, wantReachable: true, diff --git a/node/node.go b/node/node.go index b8c0277146..e66ef10504 100644 --- a/node/node.go +++ b/node/node.go @@ -603,7 +603,7 @@ func (n *Node) EventMux() *event.TypeMux { // OpenDatabase opens an existing database with the given name (or creates one if no // previous can be found) from within the node's instance directory. If the node is // ephemeral, a memory database is returned. -func (n *Node) OpenDatabase(name string, cache, handles int, namespace string) (ethdb.Database, error) { +func (n *Node) OpenDatabase(name string, cache, handles int, namespace string, readonly bool) (ethdb.Database, error) { n.lock.Lock() defer n.lock.Unlock() if n.state == closedState { @@ -615,7 +615,7 @@ func (n *Node) OpenDatabase(name string, cache, handles int, namespace string) ( if n.config.DataDir == "" { db = rawdb.NewMemoryDatabase() } else { - db, err = rawdb.NewLevelDBDatabase(n.ResolvePath(name), cache, handles, namespace) + db, err = rawdb.NewLevelDBDatabase(n.ResolvePath(name), cache, handles, namespace, readonly) } if err == nil { @@ -629,12 +629,12 @@ func (n *Node) OpenDatabase(name string, cache, handles int, namespace string) ( // also attaching a chain freezer to it that moves ancient chain data from the // database to immutable append-only files. If the node is an ephemeral one, a // memory database is returned. -func (n *Node) OpenDatabaseWithFreezerRemote(name string, cache, handles int, freezerURL string) (ethdb.Database, error) { +func (n *Node) OpenDatabaseWithFreezerRemote(name string, cache, handles int, freezerURL string, readonly bool) (ethdb.Database, error) { if n.config.DataDir == "" { return rawdb.NewMemoryDatabase(), nil } root := n.config.ResolvePath(name) - return rawdb.NewLevelDBDatabaseWithFreezerRemote(root, cache, handles, freezerURL) + return rawdb.NewLevelDBDatabaseWithFreezerRemote(root, cache, handles, freezerURL, readonly) } // OpenDatabaseWithFreezer opens an existing database with the given name (or @@ -642,7 +642,7 @@ func (n *Node) OpenDatabaseWithFreezerRemote(name string, cache, handles int, fr // also attaching a chain freezer to it that moves ancient chain data from the // database to immutable append-only files. If the node is an ephemeral one, a // memory database is returned. -func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, freezer, namespace string) (ethdb.Database, error) { +func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, freezer, namespace string, readonly bool) (ethdb.Database, error) { n.lock.Lock() defer n.lock.Unlock() if n.state == closedState { @@ -661,7 +661,7 @@ func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, freezer, case !filepath.IsAbs(freezer): freezer = n.ResolvePath(freezer) } - db, err = rawdb.NewLevelDBDatabaseWithFreezer(root, cache, handles, freezer, namespace) + db, err = rawdb.NewLevelDBDatabaseWithFreezer(root, cache, handles, freezer, namespace, readonly) } if err == nil { diff --git a/node/node_test.go b/node/node_test.go index 1cd9c5bcd8..d758ed058f 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -352,7 +352,7 @@ func TestNodeCloseClosesDB(t *testing.T) { stack, _ := New(testNodeConfig()) defer stack.Close() - db, err := stack.OpenDatabase("mydb", 0, 0, "") + db, err := stack.OpenDatabase("mydb", 0, 0, "", false) if err != nil { t.Fatal("can't open DB:", err) } @@ -375,7 +375,7 @@ func TestNodeOpenDatabaseFromLifecycleStart(t *testing.T) { var err error stack.RegisterLifecycle(&InstrumentedService{ startHook: func() { - db, err = stack.OpenDatabase("mydb", 0, 0, "") + db, err = stack.OpenDatabase("mydb", 0, 0, "", false) if err != nil { t.Fatal("can't open DB:", err) } @@ -396,7 +396,7 @@ func TestNodeOpenDatabaseFromLifecycleStop(t *testing.T) { stack.RegisterLifecycle(&InstrumentedService{ stopHook: func() { - db, err := stack.OpenDatabase("mydb", 0, 0, "") + db, err := stack.OpenDatabase("mydb", 0, 0, "", false) if err != nil { t.Fatal("can't open DB:", err) } diff --git a/oss-fuzz.sh b/oss-fuzz.sh index ac93a5a467..f8152f0fad 100644 --- a/oss-fuzz.sh +++ b/oss-fuzz.sh @@ -102,6 +102,7 @@ compile_fuzzer tests/fuzzers/stacktrie Fuzz fuzzStackTrie compile_fuzzer tests/fuzzers/difficulty Fuzz fuzzDifficulty compile_fuzzer tests/fuzzers/abi Fuzz fuzzAbi compile_fuzzer tests/fuzzers/les Fuzz fuzzLes +compile_fuzzer tests/fuzzers/vflux FuzzClientPool fuzzClientPool compile_fuzzer tests/fuzzers/bls12381 FuzzG1Add fuzz_g1_add compile_fuzzer tests/fuzzers/bls12381 FuzzG1Mul fuzz_g1_mul diff --git a/p2p/dnsdisc/client.go b/p2p/dnsdisc/client.go index d3e8111ab5..3e4b50aadd 100644 --- a/p2p/dnsdisc/client.go +++ b/p2p/dnsdisc/client.go @@ -37,9 +37,10 @@ import ( // Client discovers nodes by querying DNS servers. type Client struct { - cfg Config - clock mclock.Clock - entries *lru.Cache + cfg Config + clock mclock.Clock + entries *lru.Cache + ratelimit *rate.Limiter } // Config holds configuration options for the client. @@ -97,8 +98,12 @@ func NewClient(cfg Config) *Client { panic(err) } rlimit := rate.NewLimiter(rate.Limit(cfg.RateLimit), 10) - cfg.Resolver = &rateLimitResolver{cfg.Resolver, rlimit} - return &Client{cfg: cfg, entries: cache, clock: mclock.System{}} + return &Client{ + cfg: cfg, + entries: cache, + clock: mclock.System{}, + ratelimit: rlimit, + } } // SyncTree downloads the entire node tree at the given URL. @@ -157,6 +162,13 @@ func parseAndVerifyRoot(txt string, loc *linkEntry) (rootEntry, error) { // resolveEntry retrieves an entry from the cache or fetches it from the network // if it isn't cached. func (c *Client) resolveEntry(ctx context.Context, domain, hash string) (entry, error) { + // The rate limit always applies, even when the result might be cached. This is + // important because it avoids hot-spinning in consumers of node iterators created on + // this client. + if err := c.ratelimit.Wait(ctx); err != nil { + return nil, err + } + cacheKey := truncateHash(hash) if e, ok := c.entries.Get(cacheKey); ok { return e.(entry), nil @@ -196,19 +208,6 @@ func (c *Client) doResolveEntry(ctx context.Context, domain, hash string) (entry return nil, nameError{name, errNoEntry} } -// rateLimitResolver applies a rate limit to a Resolver. -type rateLimitResolver struct { - r Resolver - limiter *rate.Limiter -} - -func (r *rateLimitResolver) LookupTXT(ctx context.Context, domain string) ([]string, error) { - if err := r.limiter.Wait(ctx); err != nil { - return nil, err - } - return r.r.LookupTXT(ctx, domain) -} - // randomIterator traverses a set of trees and returns nodes found in them. type randomIterator struct { cur *enode.Node diff --git a/p2p/dnsdisc/tree.go b/p2p/dnsdisc/tree.go index 82a935ca41..410ec3b854 100644 --- a/p2p/dnsdisc/tree.go +++ b/p2p/dnsdisc/tree.go @@ -113,10 +113,41 @@ func (t *Tree) Nodes() []*enode.Node { return nodes } +/* +We want to keep the UDP size below 512 bytes. The UDP size is roughly: +UDP length = 8 + UDP payload length ( 229 ) +UPD Payload length: + - dns.id 2 + - dns.flags 2 + - dns.count.queries 2 + - dns.count.answers 2 + - dns.count.auth_rr 2 + - dns.count.add_rr 2 + - queries (query-size + 6) + - answers : + - dns.resp.name 2 + - dns.resp.type 2 + - dns.resp.class 2 + - dns.resp.ttl 4 + - dns.resp.len 2 + - dns.txt.length 1 + - dns.txt resp_data_size + +So the total size is roughly a fixed overhead of `39`, and the size of the +query (domain name) and response. +The query size is, for example, FVY6INQ6LZ33WLCHO3BPR3FH6Y.snap.mainnet.ethdisco.net (52) + +We also have some static data in the response, such as `enrtree-branch:`, and potentially +splitting the response up with `" "`, leaving us with a size of roughly `400` that we need +to stay below. + +The number `370` is used to have some margin for extra overhead (for example, the dns query +may be larger - more subdomains). +*/ const ( - hashAbbrev = 16 - maxChildren = 300 / hashAbbrev * (13 / 8) - minHashLength = 12 + hashAbbrevSize = 1 + 16*13/8 // Size of an encoded hash (plus comma) + maxChildren = 370 / hashAbbrevSize // 13 children + minHashLength = 12 ) // MakeTree creates a tree containing the given nodes and links. diff --git a/p2p/enr/enr.go b/p2p/enr/enr.go index c36ae9e3ed..05e43fd805 100644 --- a/p2p/enr/enr.go +++ b/p2p/enr/enr.go @@ -50,6 +50,7 @@ var ( errNotSorted = errors.New("record key/value pairs are not sorted by key") errDuplicateKey = errors.New("record contains duplicate key") errIncompletePair = errors.New("record contains incomplete k/v pair") + errIncompleteList = errors.New("record contains less than two list elements") errTooBig = fmt.Errorf("record bigger than %d bytes", SizeLimit) errEncodeUnsigned = errors.New("can't encode unsigned record") errNotFound = errors.New("no such key in record") @@ -209,9 +210,15 @@ func decodeRecord(s *rlp.Stream) (dec Record, raw []byte, err error) { return dec, raw, err } if err = s.Decode(&dec.signature); err != nil { + if err == rlp.EOL { + err = errIncompleteList + } return dec, raw, err } if err = s.Decode(&dec.seq); err != nil { + if err == rlp.EOL { + err = errIncompleteList + } return dec, raw, err } // The rest of the record contains sorted k/v pairs. diff --git a/p2p/enr/enr_test.go b/p2p/enr/enr_test.go index 96a9ced5cf..bf3f104744 100644 --- a/p2p/enr/enr_test.go +++ b/p2p/enr/enr_test.go @@ -231,6 +231,29 @@ func TestRecordTooBig(t *testing.T) { require.NoError(t, signTest([]byte{5}, &r)) } +// This checks that incomplete RLP inputs are handled correctly. +func TestDecodeIncomplete(t *testing.T) { + type decTest struct { + input []byte + err error + } + tests := []decTest{ + {[]byte{0xC0}, errIncompleteList}, + {[]byte{0xC1, 0x1}, errIncompleteList}, + {[]byte{0xC2, 0x1, 0x2}, nil}, + {[]byte{0xC3, 0x1, 0x2, 0x3}, errIncompletePair}, + {[]byte{0xC4, 0x1, 0x2, 0x3, 0x4}, nil}, + {[]byte{0xC5, 0x1, 0x2, 0x3, 0x4, 0x5}, errIncompletePair}, + } + for _, test := range tests { + var r Record + err := rlp.DecodeBytes(test.input, &r) + if err != test.err { + t.Errorf("wrong error for %X: %v", test.input, err) + } + } +} + // TestSignEncodeAndDecodeRandom tests encoding/decoding of records containing random key/value pairs. func TestSignEncodeAndDecodeRandom(t *testing.T) { var r Record diff --git a/p2p/metrics.go b/p2p/metrics.go index 44946473fa..be0d2f495e 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -25,8 +25,17 @@ import ( ) const ( + // ingressMeterName is the prefix of the per-packet inbound metrics. ingressMeterName = "p2p/ingress" - egressMeterName = "p2p/egress" + + // egressMeterName is the prefix of the per-packet outbound metrics. + egressMeterName = "p2p/egress" + + // HandleHistName is the prefix of the per-packet serving time histograms. + HandleHistName = "p2p/handle" + + // WaitHistName is the prefix of the per-packet (req only) waiting time histograms. + WaitHistName = "p2p/wait" ) var ( diff --git a/p2p/nodestate/nodestate.go b/p2p/nodestate/nodestate.go index d3166f1d87..9323d53cbd 100644 --- a/p2p/nodestate/nodestate.go +++ b/p2p/nodestate/nodestate.go @@ -858,6 +858,23 @@ func (ns *NodeStateMachine) GetField(n *enode.Node, field Field) interface{} { return nil } +// GetState retrieves the current state of the given node. Note that when used in a +// subscription callback the result can be out of sync with the state change represented +// by the callback parameters so extra safety checks might be necessary. +func (ns *NodeStateMachine) GetState(n *enode.Node) Flags { + ns.lock.Lock() + defer ns.lock.Unlock() + + ns.checkStarted() + if ns.closed { + return Flags{} + } + if _, node := ns.updateEnode(n); node != nil { + return Flags{mask: node.state, setup: ns.setup} + } + return Flags{} +} + // SetField sets the given field of the given node and blocks until the operation is finished func (ns *NodeStateMachine) SetField(n *enode.Node, field Field, value interface{}) error { ns.lock.Lock() diff --git a/p2p/server.go b/p2p/server.go index fc71548554..f70ebf7216 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -876,8 +876,8 @@ func (srv *Server) listenLoop() { } remoteIP := netutil.AddrIP(fd.RemoteAddr()) - if err := srv.checkInboundConn(fd, remoteIP); err != nil { - srv.log.Debug("Rejected inbound connnection", "addr", fd.RemoteAddr(), "err", err) + if err := srv.checkInboundConn(remoteIP); err != nil { + srv.log.Debug("Rejected inbound connection", "addr", fd.RemoteAddr(), "err", err) fd.Close() slots <- struct{}{} continue @@ -897,7 +897,7 @@ func (srv *Server) listenLoop() { } } -func (srv *Server) checkInboundConn(fd net.Conn, remoteIP net.IP) error { +func (srv *Server) checkInboundConn(remoteIP net.IP) error { if remoteIP == nil { return nil } diff --git a/params/config.go b/params/config.go index 3db77498f5..7f90e8e283 100644 --- a/params/config.go +++ b/params/config.go @@ -73,10 +73,10 @@ var ( // MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network. MainnetTrustedCheckpoint = &ctypes.TrustedCheckpoint{ - SectionIndex: 364, - SectionHead: common.HexToHash("0x3fd20ff221f5e962bb66f57a61973bfc2ba959879a6509384a80a45d208b5afc"), - CHTRoot: common.HexToHash("0xe35b3b807f4e9427fb4e2929961c78a9dc10f503a538319031cc7d00946a0591"), - BloomRoot: common.HexToHash("0x340553b378b2db214b898be15c80ac5be7caffc2e6448fd6f7aff23290d89296"), + SectionIndex: 371, + SectionHead: common.HexToHash("0x50fd3cec5376ede90ef9129772022690cd1467f22c18abb7faa11e793c51e9c9"), + CHTRoot: common.HexToHash("0xb57b4b22a77b5930847b1ca9f62daa11eae6578948cb7b18997f2c0fe5757025"), + BloomRoot: common.HexToHash("0xa338f8a868a194fa90327d0f5877f656a9f3640c618d2a01a01f2e76ef9ef954"), } // MainnetCheckpointOracle contains a set of configs for the main network oracle. @@ -158,10 +158,10 @@ var ( // RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network. RinkebyTrustedCheckpoint = &ctypes.TrustedCheckpoint{ - SectionIndex: 248, - SectionHead: common.HexToHash("0x26874cf023695778cc3175d1bec19894204d8d0b756b587e81e35f300dc5b33c"), - CHTRoot: common.HexToHash("0xc129d1ed6673c5d3e1068e9d97244e72952b7ca08acbd7b3bfa58bc3085c442c"), - BloomRoot: common.HexToHash("0x1dafe79dcd7d348782aa834a4a4397890d9ad90643736791132ed5c16879a037"), + SectionIndex: 254, + SectionHead: common.HexToHash("0x0cba01dd71baa22ac8fa0b105bc908e94f9ecfbc79b4eb97427fe07b5851dd10"), + CHTRoot: common.HexToHash("0x5673d8fc49c9c7d8729068640e4b392d46952a5a38798973bac1cf1d0d27ad7d"), + BloomRoot: common.HexToHash("0x70e01232b66df9a7778ae3291c9217afb9a2d9f799f32d7b912bd37e7bce83a8"), } // RinkebyCheckpointOracle contains a set of configs for the Rinkeby test network oracle. @@ -201,10 +201,10 @@ var ( // GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network. GoerliTrustedCheckpoint = &ctypes.TrustedCheckpoint{ - SectionIndex: 132, - SectionHead: common.HexToHash("0x29fa240c97b47ecbfef3fea8b3cff035d93154d1d48b25e3333cf2f7067c5324"), - CHTRoot: common.HexToHash("0x85e5c59e5b202284291405dadc40dc36ab6417bd189fb18be24f6dcab6b80511"), - BloomRoot: common.HexToHash("0x0b7afdd200477f46e982e2cabc822ac454424986fa50d899685dfaeede1f882d"), + SectionIndex: 138, + SectionHead: common.HexToHash("0xb7ea0566abd7d0def5b3c9afa3431debb7bb30b65af35f106ca93a59e6c859a7"), + CHTRoot: common.HexToHash("0x378c7ea9081242beb982e2e39567ba12f2ed3e59e5aba3f9db1d595646d7c9f4"), + BloomRoot: common.HexToHash("0x523c169286cfca52e8a6579d8c35dc8bf093412d8a7478163bfa81ae91c2492d"), } // GoerliCheckpointOracle contains a set of configs for the Goerli test network oracle. diff --git a/rlp/iterator.go b/rlp/iterator.go index c28866dbc1..559e03a868 100644 --- a/rlp/iterator.go +++ b/rlp/iterator.go @@ -23,6 +23,7 @@ type listIterator struct { } // NewListIterator creates an iterator for the (list) represented by data +// TODO: Consider removing this implementation, as it is no longer used. func NewListIterator(data RawValue) (*listIterator, error) { k, t, c, err := readKind(data) if err != nil { diff --git a/rpc/client_test.go b/rpc/client_test.go index 5d301a07a7..224eb0c5c8 100644 --- a/rpc/client_test.go +++ b/rpc/client_test.go @@ -18,6 +18,7 @@ package rpc import ( "context" + "encoding/json" "fmt" "math/rand" "net" @@ -376,6 +377,93 @@ func TestClientCloseUnsubscribeRace(t *testing.T) { } } +// unsubscribeRecorder collects the subscription IDs of *_unsubscribe calls. +type unsubscribeRecorder struct { + ServerCodec + unsubscribes map[string]bool +} + +func (r *unsubscribeRecorder) readBatch() ([]*jsonrpcMessage, bool, error) { + if r.unsubscribes == nil { + r.unsubscribes = make(map[string]bool) + } + + msgs, batch, err := r.ServerCodec.readBatch() + for _, msg := range msgs { + if msg.isUnsubscribe() { + var params []string + if err := json.Unmarshal(msg.Params, ¶ms); err != nil { + panic("unsubscribe decode error: " + err.Error()) + } + r.unsubscribes[params[0]] = true + } + } + return msgs, batch, err +} + +// This checks that Client calls the _unsubscribe method on the server when Unsubscribe is +// called on a subscription. +func TestClientSubscriptionUnsubscribeServer(t *testing.T) { + t.Parallel() + + // Create the server. + srv := NewServer() + srv.RegisterName("nftest", new(notificationTestService)) + p1, p2 := net.Pipe() + recorder := &unsubscribeRecorder{ServerCodec: NewCodec(p1)} + go srv.ServeCodec(recorder, OptionMethodInvocation|OptionSubscriptions) + defer srv.Stop() + + // Create the client on the other end of the pipe. + client, _ := newClient(context.Background(), func(context.Context) (ServerCodec, error) { + return NewCodec(p2), nil + }) + defer client.Close() + + // Create the subscription. + ch := make(chan int) + sub, err := client.Subscribe(context.Background(), "nftest", ch, "someSubscription", 1, 1) + if err != nil { + t.Fatal(err) + } + + // Unsubscribe and check that unsubscribe was called. + sub.Unsubscribe() + if !recorder.unsubscribes[sub.subid] { + t.Fatal("client did not call unsubscribe method") + } + if _, open := <-sub.Err(); open { + t.Fatal("subscription error channel not closed after unsubscribe") + } +} + +// This checks that the subscribed channel can be closed after Unsubscribe. +// It is the reproducer for https://github.com/ethereum/go-ethereum/issues/22322 +func TestClientSubscriptionChannelClose(t *testing.T) { + t.Parallel() + + var ( + srv = NewServer() + httpsrv = httptest.NewServer(srv.WebsocketHandler(nil)) + wsURL = "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + ) + defer srv.Stop() + defer httpsrv.Close() + + srv.RegisterName("nftest", new(notificationTestService)) + client, _ := Dial(wsURL) + + for i := 0; i < 100; i++ { + ch := make(chan int, 100) + sub, err := client.Subscribe(context.Background(), "nftest", ch, "someSubscription", maxClientSubscriptionBuffer-1, 1) + if err != nil { + t.Fatal(err) + } + sub.Unsubscribe() + close(ch) + } +} + // This test checks that Client doesn't lock up when a single subscriber // doesn't read subscription events. func TestClientNotificationStorm(t *testing.T) { diff --git a/rpc/handler.go b/rpc/handler.go index 23023eaca1..85f774a1b7 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -189,7 +189,7 @@ func (h *handler) cancelAllRequests(err error, inflightReq *requestOp) { } for id, sub := range h.clientSubs { delete(h.clientSubs, id) - sub.quitWithError(false, err) + sub.close(err) } } @@ -281,7 +281,7 @@ func (h *handler) handleResponse(msg *jsonrpcMessage) { return } if op.err = json.Unmarshal(msg.Result, &op.sub.subid); op.err == nil { - go op.sub.start() + go op.sub.run() h.clientSubs[op.sub.subid] = op.sub } } diff --git a/rpc/subscription.go b/rpc/subscription.go index 233215d792..942e764e5d 100644 --- a/rpc/subscription.go +++ b/rpc/subscription.go @@ -208,23 +208,37 @@ type ClientSubscription struct { channel reflect.Value namespace string subid string - in chan json.RawMessage - quitOnce sync.Once // ensures quit is closed once - quit chan struct{} // quit is closed when the subscription exits - errOnce sync.Once // ensures err is closed once - err chan error + // The in channel receives notification values from client dispatcher. + in chan json.RawMessage + + // The error channel receives the error from the forwarding loop. + // It is closed by Unsubscribe. + err chan error + errOnce sync.Once + + // Closing of the subscription is requested by sending on 'quit'. This is handled by + // the forwarding loop, which closes 'forwardDone' when it has stopped sending to + // sub.channel. Finally, 'unsubDone' is closed after unsubscribing on the server side. + quit chan error + forwardDone chan struct{} + unsubDone chan struct{} } +// This is the sentinel value sent on sub.quit when Unsubscribe is called. +var errUnsubscribed = errors.New("unsubscribed") + func newClientSubscription(c *Client, namespace string, channel reflect.Value) *ClientSubscription { sub := &ClientSubscription{ - client: c, - namespace: namespace, - etype: channel.Type().Elem(), - channel: channel, - quit: make(chan struct{}), - err: make(chan error, 1), - in: make(chan json.RawMessage), + client: c, + namespace: namespace, + etype: channel.Type().Elem(), + channel: channel, + in: make(chan json.RawMessage), + quit: make(chan error), + forwardDone: make(chan struct{}), + unsubDone: make(chan struct{}), + err: make(chan error, 1), } return sub } @@ -232,9 +246,9 @@ func newClientSubscription(c *Client, namespace string, channel reflect.Value) * // Err returns the subscription error channel. The intended use of Err is to schedule // resubscription when the client connection is closed unexpectedly. // -// The error channel receives a value when the subscription has ended due -// to an error. The received error is nil if Close has been called -// on the underlying client and no other error has occurred. +// The error channel receives a value when the subscription has ended due to an error. The +// received error is nil if Close has been called on the underlying client and no other +// error has occurred. // // The error channel is closed when Unsubscribe is called on the subscription. func (sub *ClientSubscription) Err() <-chan error { @@ -244,41 +258,63 @@ func (sub *ClientSubscription) Err() <-chan error { // Unsubscribe unsubscribes the notification and closes the error channel. // It can safely be called more than once. func (sub *ClientSubscription) Unsubscribe() { - sub.quitWithError(true, nil) - sub.errOnce.Do(func() { close(sub.err) }) -} - -func (sub *ClientSubscription) quitWithError(unsubscribeServer bool, err error) { - sub.quitOnce.Do(func() { - // The dispatch loop won't be able to execute the unsubscribe call - // if it is blocked on deliver. Close sub.quit first because it - // unblocks deliver. - close(sub.quit) - if unsubscribeServer { - sub.requestUnsubscribe() - } - if err != nil { - if err == ErrClientQuit { - err = nil // Adhere to subscription semantics. - } - sub.err <- err + sub.errOnce.Do(func() { + select { + case sub.quit <- errUnsubscribed: + <-sub.unsubDone + case <-sub.unsubDone: } + close(sub.err) }) } +// deliver is called by the client's message dispatcher to send a notification value. func (sub *ClientSubscription) deliver(result json.RawMessage) (ok bool) { select { case sub.in <- result: return true - case <-sub.quit: + case <-sub.forwardDone: return false } } -func (sub *ClientSubscription) start() { - sub.quitWithError(sub.forward()) +// close is called by the client's message dispatcher when the connection is closed. +func (sub *ClientSubscription) close(err error) { + select { + case sub.quit <- err: + case <-sub.forwardDone: + } +} + +// run is the forwarding loop of the subscription. It runs in its own goroutine and +// is launched by the client's handler after the subscription has been created. +func (sub *ClientSubscription) run() { + defer close(sub.unsubDone) + + unsubscribe, err := sub.forward() + + // The client's dispatch loop won't be able to execute the unsubscribe call if it is + // blocked in sub.deliver() or sub.close(). Closing forwardDone unblocks them. + close(sub.forwardDone) + + // Call the unsubscribe method on the server. + if unsubscribe { + sub.requestUnsubscribe() + } + + // Send the error. + if err != nil { + if err == ErrClientQuit { + // ErrClientQuit gets here when Client.Close is called. This is reported as a + // nil error because it's not an error, but we can't close sub.err here. + err = nil + } + sub.err <- err + } } +// forward is the forwarding loop. It takes in RPC notifications and sends them +// on the subscription channel. func (sub *ClientSubscription) forward() (unsubscribeServer bool, err error) { cases := []reflect.SelectCase{ {Dir: reflect.SelectRecv, Chan: reflect.ValueOf(sub.quit)}, @@ -286,7 +322,7 @@ func (sub *ClientSubscription) forward() (unsubscribeServer bool, err error) { {Dir: reflect.SelectSend, Chan: sub.channel}, } buffer := list.New() - defer buffer.Init() + for { var chosen int var recv reflect.Value @@ -301,7 +337,15 @@ func (sub *ClientSubscription) forward() (unsubscribeServer bool, err error) { switch chosen { case 0: // <-sub.quit - return false, nil + if !recv.IsNil() { + err = recv.Interface().(error) + } + if err == errUnsubscribed { + // Exiting because Unsubscribe was called, unsubscribe on server. + return true, nil + } + return false, err + case 1: // <-sub.in val, err := sub.unmarshal(recv.Interface().(json.RawMessage)) if err != nil { @@ -311,6 +355,7 @@ func (sub *ClientSubscription) forward() (unsubscribeServer bool, err error) { return true, ErrSubscriptionQueueOverflow } buffer.PushBack(val) + case 2: // sub.channel<- cases[2].Send = reflect.Value{} // Don't hold onto the value. buffer.Remove(buffer.Front()) diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go index 37ed19476f..4976853baf 100644 --- a/rpc/websocket_test.go +++ b/rpc/websocket_test.go @@ -129,11 +129,15 @@ func TestClientWebsocketPing(t *testing.T) { if err != nil { t.Fatalf("client dial error: %v", err) } + defer client.Close() + resultChan := make(chan int) sub, err := client.EthSubscribe(ctx, resultChan, "foo") if err != nil { t.Fatalf("client subscribe error: %v", err) } + // Note: Unsubscribe is not called on this subscription because the mockup + // server can't handle the request. // Wait for the context's deadline to be reached before proceeding. // This is important for reproducing https://github.com/ethereum/go-ethereum/issues/19798 diff --git a/tests/fuzzers/les/les-fuzzer.go b/tests/fuzzers/les/les-fuzzer.go index 46ed6228f8..f8fe8c78c4 100644 --- a/tests/fuzzers/les/les-fuzzer.go +++ b/tests/fuzzers/les/les-fuzzer.go @@ -263,18 +263,18 @@ func (d dummyMsg) Decode(val interface{}) error { } func (f *fuzzer) doFuzz(msgCode uint64, packet interface{}) { - version := f.randomInt(3) + 2 // [LES2, LES3, LES4] - peer := l.NewFuzzerPeer(version) enc, err := rlp.EncodeToBytes(packet) if err != nil { panic(err) } + version := f.randomInt(3) + 2 // [LES2, LES3, LES4] + peer, closeFn := l.NewFuzzerPeer(version) + defer closeFn() fn, _, _, err := l.Les3[msgCode].Handle(dummyMsg{enc}) if err != nil { panic(err) } fn(f, peer, func() bool { return true }) - } func Fuzz(input []byte) int { diff --git a/tests/fuzzers/vflux/clientpool-fuzzer.go b/tests/fuzzers/vflux/clientpool-fuzzer.go new file mode 100644 index 0000000000..41b8627348 --- /dev/null +++ b/tests/fuzzers/vflux/clientpool-fuzzer.go @@ -0,0 +1,289 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package vflux + +import ( + "bytes" + "encoding/binary" + "io" + "math" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/ethdb/memorydb" + "github.com/ethereum/go-ethereum/les/vflux" + vfs "github.com/ethereum/go-ethereum/les/vflux/server" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/ethereum/go-ethereum/rlp" +) + +type fuzzer struct { + peers [256]*clientPeer + disconnectList []*clientPeer + input io.Reader + exhausted bool + activeCount, activeCap uint64 + maxCount, maxCap uint64 +} + +type clientPeer struct { + fuzzer *fuzzer + node *enode.Node + freeID string + timeout time.Duration + + balance vfs.ConnectedBalance + capacity uint64 +} + +func (p *clientPeer) Node() *enode.Node { + return p.node +} + +func (p *clientPeer) FreeClientId() string { + return p.freeID +} + +func (p *clientPeer) InactiveAllowance() time.Duration { + return p.timeout +} + +func (p *clientPeer) UpdateCapacity(newCap uint64, requested bool) { + p.fuzzer.activeCap -= p.capacity + if p.capacity != 0 { + p.fuzzer.activeCount-- + } + p.capacity = newCap + p.fuzzer.activeCap += p.capacity + if p.capacity != 0 { + p.fuzzer.activeCount++ + } +} + +func (p *clientPeer) Disconnect() { + p.fuzzer.disconnectList = append(p.fuzzer.disconnectList, p) + p.fuzzer.activeCap -= p.capacity + if p.capacity != 0 { + p.fuzzer.activeCount-- + } + p.capacity = 0 + p.balance = nil +} + +func newFuzzer(input []byte) *fuzzer { + f := &fuzzer{ + input: bytes.NewReader(input), + } + for i := range f.peers { + f.peers[i] = &clientPeer{ + fuzzer: f, + node: enode.SignNull(new(enr.Record), enode.ID{byte(i)}), + freeID: string([]byte{byte(i)}), + timeout: f.randomDelay(), + } + } + return f +} + +func (f *fuzzer) read(size int) []byte { + out := make([]byte, size) + if _, err := f.input.Read(out); err != nil { + f.exhausted = true + } + return out +} + +func (f *fuzzer) randomByte() byte { + d := f.read(1) + return d[0] +} + +func (f *fuzzer) randomBool() bool { + d := f.read(1) + return d[0]&1 == 1 +} + +func (f *fuzzer) randomInt(max int) int { + if max == 0 { + return 0 + } + if max <= 256 { + return int(f.randomByte()) % max + } + var a uint16 + if err := binary.Read(f.input, binary.LittleEndian, &a); err != nil { + f.exhausted = true + } + return int(a % uint16(max)) +} + +func (f *fuzzer) randomTokenAmount(signed bool) int64 { + x := uint64(f.randomInt(65000)) + x = x * x * x * x + + if signed && (x&1) == 1 { + if x <= math.MaxInt64 { + return -int64(x) + } + return math.MinInt64 + } + if x <= math.MaxInt64 { + return int64(x) + } + return math.MaxInt64 +} + +func (f *fuzzer) randomDelay() time.Duration { + delay := f.randomByte() + if delay < 128 { + return time.Duration(delay) * time.Second + } + return 0 +} + +func (f *fuzzer) randomFactors() vfs.PriceFactors { + return vfs.PriceFactors{ + TimeFactor: float64(f.randomByte()) / 25500, + CapacityFactor: float64(f.randomByte()) / 255, + RequestFactor: float64(f.randomByte()) / 255, + } +} + +func (f *fuzzer) connectedBalanceOp(balance vfs.ConnectedBalance) { + switch f.randomInt(3) { + case 0: + balance.RequestServed(uint64(f.randomTokenAmount(false))) + case 1: + balance.SetPriceFactors(f.randomFactors(), f.randomFactors()) + case 2: + balance.GetBalance() + balance.GetRawBalance() + balance.GetPriceFactors() + } +} + +func (f *fuzzer) atomicBalanceOp(balance vfs.AtomicBalanceOperator) { + switch f.randomInt(3) { + case 0: + balance.AddBalance(f.randomTokenAmount(true)) + case 1: + balance.SetBalance(uint64(f.randomTokenAmount(false)), uint64(f.randomTokenAmount(false))) + case 2: + balance.GetBalance() + balance.GetRawBalance() + balance.GetPriceFactors() + } +} + +func FuzzClientPool(input []byte) int { + if len(input) > 10000 { + return -1 + } + f := newFuzzer(input) + if f.exhausted { + return 0 + } + clock := &mclock.Simulated{} + db := memorydb.New() + pool := vfs.NewClientPool(db, 10, f.randomDelay(), clock, func() bool { return true }) + pool.Start() + defer pool.Stop() + + count := 0 + for !f.exhausted && count < 1000 { + count++ + switch f.randomInt(11) { + case 0: + i := int(f.randomByte()) + f.peers[i].balance = pool.Register(f.peers[i]) + case 1: + i := int(f.randomByte()) + f.peers[i].Disconnect() + case 2: + f.maxCount = uint64(f.randomByte()) + f.maxCap = uint64(f.randomByte()) + f.maxCap *= f.maxCap + pool.SetLimits(f.maxCount, f.maxCap) + case 3: + pool.SetConnectedBias(f.randomDelay()) + case 4: + pool.SetDefaultFactors(f.randomFactors(), f.randomFactors()) + case 5: + pool.SetExpirationTCs(uint64(f.randomInt(50000)), uint64(f.randomInt(50000))) + case 6: + if _, err := pool.SetCapacity(f.peers[f.randomByte()].node, uint64(f.randomByte()), f.randomDelay(), f.randomBool()); err == vfs.ErrCantFindMaximum { + panic(nil) + } + case 7: + if balance := f.peers[f.randomByte()].balance; balance != nil { + f.connectedBalanceOp(balance) + } + case 8: + pool.BalanceOperation(f.peers[f.randomByte()].node.ID(), f.peers[f.randomByte()].freeID, func(balance vfs.AtomicBalanceOperator) { + count := f.randomInt(4) + for i := 0; i < count; i++ { + f.atomicBalanceOp(balance) + } + }) + case 9: + pool.TotalTokenAmount() + pool.GetExpirationTCs() + pool.Active() + pool.Limits() + pool.GetPosBalanceIDs(f.peers[f.randomByte()].node.ID(), f.peers[f.randomByte()].node.ID(), f.randomInt(100)) + case 10: + req := vflux.CapacityQueryReq{ + Bias: uint64(f.randomByte()), + AddTokens: make([]vflux.IntOrInf, f.randomInt(vflux.CapacityQueryMaxLen+1)), + } + for i := range req.AddTokens { + v := vflux.IntOrInf{Type: uint8(f.randomInt(4))} + if v.Type < 2 { + v.Value = *big.NewInt(f.randomTokenAmount(false)) + } + req.AddTokens[i] = v + } + reqEnc, err := rlp.EncodeToBytes(&req) + if err != nil { + panic(err) + } + p := int(f.randomByte()) + if p < len(reqEnc) { + reqEnc[p] = f.randomByte() + } + pool.Handle(f.peers[f.randomByte()].node.ID(), f.peers[f.randomByte()].freeID, vflux.CapacityQueryName, reqEnc) + } + + for _, peer := range f.disconnectList { + pool.Unregister(peer) + } + f.disconnectList = nil + if d := f.randomDelay(); d > 0 { + clock.Run(d) + } + //fmt.Println(f.activeCount, f.maxCount, f.activeCap, f.maxCap) + if activeCount, activeCap := pool.Active(); activeCount != f.activeCount || activeCap != f.activeCap { + panic(nil) + } + if f.activeCount > f.maxCount || f.activeCap > f.maxCap { + panic(nil) + } + } + return 0 +} diff --git a/tests/fuzzers/vflux/debug/main.go b/tests/fuzzers/vflux/debug/main.go new file mode 100644 index 0000000000..de0b5d4124 --- /dev/null +++ b/tests/fuzzers/vflux/debug/main.go @@ -0,0 +1,41 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package main + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/ethereum/go-ethereum/tests/fuzzers/vflux" +) + +func main() { + if len(os.Args) != 2 { + fmt.Fprintf(os.Stderr, "Usage: debug \n") + fmt.Fprintf(os.Stderr, "Example\n") + fmt.Fprintf(os.Stderr, " $ debug ../crashers/4bbef6857c733a87ecf6fd8b9e7238f65eb9862a\n") + os.Exit(1) + } + crasher := os.Args[1] + data, err := ioutil.ReadFile(crasher) + if err != nil { + fmt.Fprintf(os.Stderr, "error loading crasher %v: %v", crasher, err) + os.Exit(1) + } + vflux.FuzzClientPool(data) +} diff --git a/trie/trie_test.go b/trie/trie_test.go index 87bce9abca..3aa4098d14 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -1060,7 +1060,7 @@ func tempDB() (string, *Database) { if err != nil { panic(fmt.Sprintf("can't create temporary directory: %v", err)) } - diskdb, err := leveldb.New(dir, 256, 0, "") + diskdb, err := leveldb.New(dir, 256, 0, "", false) if err != nil { panic(fmt.Sprintf("can't create temporary database: %v", err)) }