From 935f10ef573193bdd6be30ead5da176e71940526 Mon Sep 17 00:00:00 2001 From: Dmytro Date: Wed, 8 May 2024 18:01:34 +0100 Subject: [PATCH 01/48] diagnostics: serve local UI (#10210) Added command to run UI locally on node machine. It allows to skip PIN step if you want to connect to local node. Run `make diag` => `./buld/bin/diag ui` which will serve UI on 127.0.0.1:8080 OPTIONS: --debug.addr value URL to the debug endpoint (default: "127.0.0.1:6060"). Needs to be set if Erigon running with custom flags `--diagnostics.endpoint.addr` and `--diagnostics.endpoint.port` flags. --ui.port - UI port to listen on (default: 8080) --- cmd/diag/main.go | 2 + cmd/diag/ui/ui.go | 137 ++++++++++++++++++++++++++++++++++++++++++++++ go.mod | 7 ++- go.sum | 11 ++-- 4 files changed, 150 insertions(+), 7 deletions(-) create mode 100644 cmd/diag/ui/ui.go diff --git a/cmd/diag/main.go b/cmd/diag/main.go index a6bff652ea0..f805b75d8b1 100644 --- a/cmd/diag/main.go +++ b/cmd/diag/main.go @@ -14,6 +14,7 @@ import ( "github.com/ledgerwatch/erigon/cmd/diag/db" "github.com/ledgerwatch/erigon/cmd/diag/downloader" "github.com/ledgerwatch/erigon/cmd/diag/stages" + "github.com/ledgerwatch/erigon/cmd/diag/ui" "github.com/ledgerwatch/erigon/cmd/snapshots/sync" "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/params" @@ -33,6 +34,7 @@ func main() { &downloader.Command, &stages.Command, &db.Command, + &ui.Command, } app.Flags = []cli.Flag{} diff --git a/cmd/diag/ui/ui.go b/cmd/diag/ui/ui.go new file mode 100644 index 00000000000..1620747b5d9 --- /dev/null +++ b/cmd/diag/ui/ui.go @@ -0,0 +1,137 @@ +package ui + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "sync" + "time" + + "github.com/ledgerwatch/erigonwatch" + + "github.com/go-chi/chi/v5" + "github.com/go-chi/chi/v5/middleware" + "github.com/go-chi/cors" + "github.com/jedib0t/go-pretty/v6/text" + "github.com/ledgerwatch/erigon/cmd/diag/flags" + "github.com/urfave/cli/v2" +) + +var ( + UIURLFlag = cli.StringFlag{ + Name: "ui.addr", + Usage: "URL to serve UI web application", + Required: false, + Value: "127.0.0.1:6060", + } +) + +var Command = cli.Command{ + Name: "ui", + Action: runUI, + Aliases: []string{"u"}, + Usage: "run local ui", + ArgsUsage: "", + Flags: []cli.Flag{ + &flags.DebugURLFlag, + &UIURLFlag, + }, + Description: ``, +} + +func runUI(cli *cli.Context) error { + supportedSubpaths := []string{ + "sentry-network", + "sentinel-network", + "downloader", + "logs", + "chain", + "data", + "debug", + "testing", + "performance", + "documentation", + "issues", + "admin", + } + + listenUrl := cli.String(UIURLFlag.Name) + + assets, _ := erigonwatch.UIFiles() + fs := http.FileServer(http.FS(assets)) + + r := chi.NewRouter() + r.Use(middleware.Logger) + r.Use(middleware.Recoverer) + r.Use(middleware.RouteHeaders(). + Route("Origin", "*", cors.Handler(cors.Options{ + AllowedOrigins: []string{"*"}, + AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}, + AllowedHeaders: []string{"Accept", "Content-Type", "session-id"}, + AllowCredentials: false, // <----------<<< do not allow credentials + })). + Handler) + + r.Mount("/", fs) + + for _, subpath := range supportedSubpaths { + addhandler(r, "/"+subpath, fs) + } + + // Use the file system to serve static files + url := "http://" + cli.String(flags.DebugURLFlag.Name) + addr := DiagAddress{ + Address: url, + } + + //r.Get("/diagaddr", writeDiagAdderss(addr)) + r.Handle("/data", http.StripPrefix("/data", fs)) + + r.HandleFunc("/diagaddr", func(w http.ResponseWriter, r *http.Request) { + writeDiagAdderss(w, addr) + }) + + srv := &http.Server{ + Addr: listenUrl, + Handler: r, + MaxHeaderBytes: 1 << 20, + ReadHeaderTimeout: 1 * time.Minute, + } + + var wg sync.WaitGroup + wg.Add(1) + + go func() { + defer wg.Done() // Signal that the goroutine has completed + err := srv.ListenAndServe() + + if err != nil { + log.Fatal(err) + } + }() + + uiUrl := fmt.Sprintf("http://%s", listenUrl) + fmt.Println(text.Hyperlink(uiUrl, fmt.Sprintf("UI running on %s", uiUrl))) + + wg.Wait() // Wait for the server goroutine to finish + return nil +} + +func addhandler(r *chi.Mux, path string, handler http.Handler) { + r.Handle(path, http.StripPrefix(path, handler)) +} + +type DiagAddress struct { + Address string `json:"address"` +} + +func writeDiagAdderss(w http.ResponseWriter, addr DiagAddress) { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Content-Type", "application/json") + + if err := json.NewEncoder(w).Encode(addr); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + +} diff --git a/go.mod b/go.mod index 542c7d25533..d5de28c063b 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/ledgerwatch/erigon -go 1.21 +go 1.21.5 require ( github.com/erigontech/mdbx-go v0.38.0 @@ -161,7 +161,7 @@ require ( github.com/elastic/gosigar v0.14.2 // indirect github.com/flynn/noise v1.0.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/garslo/gogen v0.0.0-20170307003452-d6ebae628c7c // indirect github.com/go-llsqlite/adapter v0.0.0-20230927005056-7f5ce7f0c916 // indirect github.com/go-llsqlite/crawshaw v0.4.0 // indirect @@ -187,6 +187,7 @@ require ( github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240505022337-08dce201593b // indirect + github.com/ledgerwatch/erigonwatch v0.1.0 github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect @@ -266,7 +267,7 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/sosodev/duration v1.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/cast v1.3.1 // indirect + github.com/spf13/cast v1.5.0 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect github.com/supranational/blst v0.3.11 // indirect github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect diff --git a/go.sum b/go.sum index d48bd3cf332..3e097d5f3da 100644 --- a/go.sum +++ b/go.sum @@ -288,8 +288,8 @@ github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7z github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/garslo/gogen v0.0.0-20170307003452-d6ebae628c7c h1:uYNKzPntb8c6DKvP9EfrBjkLkU7pM4lM+uuHSIa8UtU= github.com/garslo/gogen v0.0.0-20170307003452-d6ebae628c7c/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= github.com/gballet/go-verkle v0.0.0-20221121182333-31427a1f2d35 h1:I8QswD9gf3VEpr7bpepKKOm7ChxFITIG+oc1I5/S0no= @@ -541,6 +541,8 @@ github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7 github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240505022337-08dce201593b h1:lfllTgrcwFzFXX7c/L4i/xAj/8noP/yHNSmC8dDi08s= github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240505022337-08dce201593b/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= +github.com/ledgerwatch/erigonwatch v0.1.0 h1:TrCjklOu9ZI9/uiMigo1Jnknnk1I/dXUxXymA3xHfzo= +github.com/ledgerwatch/erigonwatch v0.1.0/go.mod h1:uYq4hs3RL1OtIYRXAxYq02tpdGkx6rtXlpzdazDDbWI= github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= github.com/ledgerwatch/log/v3 v3.9.0/go.mod h1:EiAY6upmI/6LkNhOVxb4eVsmsP11HZCnZ3PlJMjYiqE= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= @@ -866,8 +868,9 @@ github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0b github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -1185,12 +1188,12 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= From 5777df7123fe31ebf68c3c3fb96bb7ef8e316225 Mon Sep 17 00:00:00 2001 From: Dmytro Date: Wed, 8 May 2024 19:03:43 +0100 Subject: [PATCH 02/48] dvovk/downloaderd cmd (#10249) Added downloader command which prints snapshot download status. It allows to see: > 1. Status of your snapshot downloads. > 2. A list of files in the snapshot and their download status. You can filter files if needed. > 3. Detailed information about a specific file download, including the peers and web seeds involved. Usage: 1. `make diag` 2. run `./build/bin/diag [command] [subcommand] [--flags]` Available commands: - `downloader ` - prints snapshot download status - `downloader files` - prints a list of files in the snapshot Available flags (in addition to global flags): - `--downloader.file.filter` - filter value for `downloader files` command. Possible values `[ all | active | inactive | downloaded | queued ]` - `--downloader.file.name` - name of the file to print download details Examples: - `downloader ` >![Screenshot 2024-05-08 at 16 10 03](https://github.com/ledgerwatch/erigon/assets/29065143/5cdb542a-a74f-42cc-8d75-644b6615d7b2) --- -`downloader files` >![Screenshot 2024-05-08 at 16 11 11](https://github.com/ledgerwatch/erigon/assets/29065143/2f8667c8-5527-4124-aaab-2e90d43997bc) --- - `downloader files --downloader.file.filter=active` >![Screenshot 2024-05-08 at 17 28 04](https://github.com/ledgerwatch/erigon/assets/29065143/a9c1e58f-0a65-4809-8d73-9f59fb4d7f24) --- - `downloader files --downloader.file.name=v1-002500-003000-transactions.seg` >![Screenshot 2024-05-08 at 17 29 23](https://github.com/ledgerwatch/erigon/assets/29065143/1f35e25f-3ec4-4359-b231-7366534a9ecb) --- cmd/diag/downloader/diag_downloader.go | 333 ++++++++++++++++++++++--- cmd/diag/util/util.go | 37 +++ 2 files changed, 338 insertions(+), 32 deletions(-) diff --git a/cmd/diag/downloader/diag_downloader.go b/cmd/diag/downloader/diag_downloader.go index db6b784dcb2..99ef13e4aa4 100644 --- a/cmd/diag/downloader/diag_downloader.go +++ b/cmd/diag/downloader/diag_downloader.go @@ -1,9 +1,12 @@ package downloader import ( - "encoding/json" "fmt" + "time" + + "github.com/jedib0t/go-pretty/v6/table" + "github.com/jedib0t/go-pretty/v6/text" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/diagnostics" "github.com/ledgerwatch/erigon/cmd/diag/flags" @@ -11,67 +14,333 @@ import ( "github.com/urfave/cli/v2" ) +var ( + FileFilterFlag = cli.StringFlag{ + Name: "downloader.file.filter", + Aliases: []string{"dff"}, + Usage: "Filter files list [all|active|inactive|downloaded|queued], dafault value is all", + Required: false, + Value: "all", + } + + FileNameFlag = cli.StringFlag{ + Name: "downloader.file.name", + Aliases: []string{"dfn"}, + Usage: "File name to print details about.", + Required: false, + Value: "", + } +) + var Command = cli.Command{ - Action: print, + Action: printDownloadStatus, Name: "downloader", Aliases: []string{"dl"}, - Usage: "print snapshot download stats", + Usage: "Print snapshot download status", ArgsUsage: "", Flags: []cli.Flag{ &flags.DebugURLFlag, &flags.OutputFlag, }, + Subcommands: []*cli.Command{ + { + Name: "files", + Aliases: []string{"fls"}, + Action: printFiles, + Usage: "Print snapshot download files status", + ArgsUsage: "", + Flags: []cli.Flag{ + &flags.DebugURLFlag, + &flags.OutputFlag, + &FileFilterFlag, + &FileNameFlag, + }, + }, + }, Description: ``, } -func print(cliCtx *cli.Context) error { - var data diagnostics.SyncStatistics - url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + "/debug/diag/snapshot-sync" +func printDownloadStatus(cliCtx *cli.Context) error { + data, err := getData(cliCtx) - err := util.MakeHttpGetCall(cliCtx.Context, url, &data) + if err != nil { + + return err + } + + snapshotDownloadStatus := getSnapshotStatusRow(data.SnapshotDownload) + + switch cliCtx.String(flags.OutputFlag.Name) { + case "json": + util.RenderJson(snapshotDownloadStatus) + + case "text": + util.RenderTableWithHeader( + "Snapshot download info:", + table.Row{"Status", "Progress", "Downloaded", "Total", "Time Left", "Total Time", "Download Rate", "Upload Rate", "Peers", "Files", "Connections", "Alloc", "Sys"}, + []table.Row{snapshotDownloadStatus}, + ) + } + + return nil +} + +func printFiles(cliCtx *cli.Context) error { + if cliCtx.String(FileNameFlag.Name) != "" { + return printFile(cliCtx) + } + + data, err := getData(cliCtx) if err != nil { + txt := text.Colors{text.FgWhite, text.BgRed} + fmt.Printf("%s %s", txt.Sprint("[ERROR]"), "Failed to connect to Erigon node.") return err } + snapshotDownloadStatus := getSnapshotStatusRow(data.SnapshotDownload) + + snapDownload := data.SnapshotDownload + + files := snapDownload.SegmentsDownloading + rows := []table.Row{} + + for _, file := range files { + rows = append(rows, getFileRow(file)) + } + + filteredRows := filterRows(rows, cliCtx.String(FileFilterFlag.Name)) + switch cliCtx.String(flags.OutputFlag.Name) { case "json": - bytes, err := json.Marshal(data.SnapshotDownload) + util.RenderJson(snapshotDownloadStatus) + util.RenderJson(filteredRows) + case "text": + //Print overall status + util.RenderTableWithHeader( + "Snapshot download info:", + table.Row{"Status", "Progress", "Downloaded", "Total", "Time Left", "Total Time", "Download Rate", "Upload Rate", "Peers", "Files", "Connections", "Alloc", "Sys"}, + []table.Row{snapshotDownloadStatus}, + ) - if err != nil { - return err + //Print files status + util.RenderTableWithHeader( + "Files download info:", + table.Row{"File", "Progress", "Total", "Downloaded", "Peers", "Peers Download Rate", "Webseeds", "Webseeds Download Rate", "Time Left", "Active"}, + filteredRows, + ) + } + + return nil +} + +func printFile(cliCtx *cli.Context) error { + data, err := getData(cliCtx) + + if err != nil { + return err + } + + snapDownload := data.SnapshotDownload + + if file, ok := snapDownload.SegmentsDownloading[cliCtx.String(FileNameFlag.Name)]; ok { + fileRow := getFileRow(file) + filePeers := getPeersRows(file.Peers) + fileWebseeds := getPeersRows(file.Webseeds) + + switch cliCtx.String(flags.OutputFlag.Name) { + case "json": + util.RenderJson(fileRow) + util.RenderJson(filePeers) + util.RenderJson(fileWebseeds) + case "text": + //Print file status + util.RenderTableWithHeader( + "file download info:", + table.Row{"File", "Progress", "Total", "Downloaded", "Peers", "Peers Download Rate", "Webseeds", "Webseeds Download Rate", "Time Left", "Active"}, + []table.Row{fileRow}, + ) + + //Print peers and webseeds status + util.RenderTableWithHeader( + "", + table.Row{"Peer", "Download Rate"}, + filePeers, + ) + + util.RenderTableWithHeader( + "", + table.Row{"Webseed", "Download Rate"}, + fileWebseeds, + ) } + } else { + txt := text.Colors{text.FgWhite, text.BgRed} + fmt.Printf("%s %s", txt.Sprint("[ERROR]"), "File with name: "+cliCtx.String(FileNameFlag.Name)+" does not exist.") + } + + return nil +} - fmt.Println(string(bytes)) +func getSnapshotStatusRow(snapDownload diagnostics.SnapshotDownloadStatistics) table.Row { + status := "Downloading" + if snapDownload.DownloadFinished { + status = "Finished" + } - case "text": - fmt.Println("-------------------Snapshot Download-------------------") - - snapDownload := data.SnapshotDownload - var remainingBytes uint64 - percent := 50 - if snapDownload.Total > snapDownload.Downloaded { - remainingBytes = snapDownload.Total - snapDownload.Downloaded - percent = int((snapDownload.Downloaded*100)/snapDownload.Total) / 2 + downloadedPercent := float32(snapDownload.Downloaded) / float32(snapDownload.Total/100) + + remainingBytes := snapDownload.Total - snapDownload.Downloaded + downloadTimeLeft := util.CalculateTime(remainingBytes, snapDownload.DownloadRate) + + totalDownloadTimeString := time.Duration(snapDownload.TotalTime * float64(time.Second)).String() + + rowObj := table.Row{ + status, // Status + fmt.Sprintf("%.2f%%", downloadedPercent), // Progress + common.ByteCount(snapDownload.Downloaded), // Downloaded + common.ByteCount(snapDownload.Total), // Total + downloadTimeLeft, // Time Left + totalDownloadTimeString, // Total Time + common.ByteCount(snapDownload.DownloadRate) + "/s", // Download Rate + common.ByteCount(snapDownload.UploadRate) + "/s", // Upload Rate + snapDownload.Peers, // Peers + snapDownload.Files, // Files + snapDownload.Connections, // Connections + common.ByteCount(snapDownload.Alloc), // Alloc + common.ByteCount(snapDownload.Sys), // Sys + } + + return rowObj +} + +func getFileRow(file diagnostics.SegmentDownloadStatistics) table.Row { + peersDownloadRate := getFileDownloadRate(file.Peers) + webseedsDownloadRate := getFileDownloadRate(file.Webseeds) + totalDownloadRate := peersDownloadRate + webseedsDownloadRate + downloadedPercent := float32(file.DownloadedBytes) / float32(file.TotalBytes/100) + remainingBytes := file.TotalBytes - file.DownloadedBytes + downloadTimeLeft := util.CalculateTime(remainingBytes, totalDownloadRate) + isActive := "false" + if totalDownloadRate > 0 { + isActive = "true" + } + + row := table.Row{ + file.Name, + fmt.Sprintf("%.2f%%", downloadedPercent), + common.ByteCount(file.TotalBytes), + common.ByteCount(file.DownloadedBytes), + len(file.Peers), + common.ByteCount(peersDownloadRate) + "/s", + len(file.Webseeds), + common.ByteCount(webseedsDownloadRate) + "/s", + downloadTimeLeft, + isActive, + } + + return row +} + +func getPeersRows(peers []diagnostics.SegmentPeer) []table.Row { + rows := make([]table.Row, 0) + + for _, peer := range peers { + row := table.Row{ + peer.Url, + common.ByteCount(peer.DownloadRate) + "/s", } - logstr := "[" + rows = append(rows, row) + } + + return rows +} + +func getFileDownloadRate(peers []diagnostics.SegmentPeer) uint64 { + var downloadRate uint64 + + for _, peer := range peers { + downloadRate += peer.DownloadRate + } + + return downloadRate +} + +func getData(cliCtx *cli.Context) (diagnostics.SyncStatistics, error) { + var data diagnostics.SyncStatistics + url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + "/debug/diag/snapshot-sync" + + err := util.MakeHttpGetCall(cliCtx.Context, url, &data) - for i := 1; i < 50; i++ { - if i < percent { - logstr += "#" - } else { - logstr += "." - } + if err != nil { + return data, err + } + + return data, nil +} + +func filterRows(rows []table.Row, filter string) []table.Row { + switch filter { + case "all": + return rows + case "active": + return filterActive(rows) + case "inactive": + return filterInactive(rows) + case "downloaded": + return filterDownloaded(rows) + case "queued": + return filterQueued(rows) + } + + return rows +} + +func filterActive(rows []table.Row) []table.Row { + filtered := []table.Row{} + + for _, row := range rows { + if row[len(row)-1] == "true" { + filtered = append(filtered, row) + } + } + + return filtered +} + +func filterInactive(rows []table.Row) []table.Row { + filtered := []table.Row{} + + for _, row := range rows { + if row[len(row)-1] == "false" { + filtered = append(filtered, row) } + } - logstr += "]" + return filtered +} - fmt.Println("Download:", logstr, common.ByteCount(snapDownload.Downloaded), "/", common.ByteCount(snapDownload.Total)) - downloadTimeLeft := util.CalculateTime(remainingBytes, snapDownload.DownloadRate) +func filterDownloaded(rows []table.Row) []table.Row { + filtered := []table.Row{} - fmt.Println("Time left:", downloadTimeLeft) + for _, row := range rows { + if row[1] == "100.00%" { + filtered = append(filtered, row) + } } - return nil + return filtered +} + +func filterQueued(rows []table.Row) []table.Row { + filtered := []table.Row{} + + for _, row := range rows { + if row[1] == "0.00%" { + filtered = append(filtered, row) + } + } + + return filtered } diff --git a/cmd/diag/util/util.go b/cmd/diag/util/util.go index f6c9e6184e2..92e2a25d0e1 100644 --- a/cmd/diag/util/util.go +++ b/cmd/diag/util/util.go @@ -6,7 +6,11 @@ import ( "fmt" "io" "net/http" + "os" "time" + + "github.com/jedib0t/go-pretty/v6/table" + "github.com/jedib0t/go-pretty/v6/text" ) func MakeHttpGetCall(ctx context.Context, url string, data interface{}) error { @@ -49,3 +53,36 @@ func CalculateTime(amountLeft, rate uint64) string { return fmt.Sprintf("%dhrs:%dm", hours, minutes) } + +func RenderJson(data interface{}) { + bytes, err := json.Marshal(data) + + if err == nil { + fmt.Println(string(bytes)) + fmt.Print("\n") + } +} + +func RenderTableWithHeader(title string, header table.Row, rows []table.Row) { + if title != "" { + txt := text.Colors{text.FgBlue, text.Bold} + fmt.Println(txt.Sprint(title)) + } + + t := table.NewWriter() + t.SetOutputMirror(os.Stdout) + + t.AppendHeader(header) + if len(rows) > 0 { + t.AppendRows(rows) + } + + t.AppendSeparator() + t.Render() + fmt.Print("\n") +} + +func RenderUseDiagUI() { + txt := text.Colors{text.BgGreen, text.Bold} + fmt.Println(txt.Sprint("To get detailed info about Erigon node state use 'diag ui' command.")) +} From 850585e54712b1950f67b1deb007ae2e7629d2dd Mon Sep 17 00:00:00 2001 From: Willian Mitsuda Date: Wed, 8 May 2024 21:01:51 -0300 Subject: [PATCH 03/48] Fix typo in externalcl flag (#10255) --- cmd/utils/flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index caddd73cbe5..9ffdf618a00 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -137,7 +137,7 @@ var ( Usage: "Lock memory maps for recent ethash mining DAGs", } ExternalConsensusFlag = cli.BoolFlag{ - Name: "exeternal", + Name: "externalcl", Usage: "Enables the external consensus layer", } // Transaction pool settings From b2e581cbefc09360f80131cb1c2431a465dfc7c7 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Thu, 9 May 2024 09:55:09 +0700 Subject: [PATCH 04/48] e3: mainnet - add blocks files to 19.8M, remove state files - because (#10240) for https://github.com/ledgerwatch/erigon-snapshot/pull/157 Seems 1 state file is broken. ``` panic: runtime error: slice bounds out of range [20140373016:20140373015] [recovered] panic: file: v1-commitment.1280-1408.kv, runtime error: slice bounds out of range [20140373016:20140373015], [decompress.go:713 panic.go:770 panic.go:160 decompress.go:732 archive.go:62 btree_index.go:796 domain.go:1074 errgroup.go:78 asm_amd64.s:1695] ``` I will re-gen it. --- cmd/diag/downloader/diag_downloader.go | 1 - erigon-lib/go.mod | 2 +- erigon-lib/go.sum | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- 5 files changed, 6 insertions(+), 7 deletions(-) diff --git a/cmd/diag/downloader/diag_downloader.go b/cmd/diag/downloader/diag_downloader.go index 99ef13e4aa4..2f8584dfc2c 100644 --- a/cmd/diag/downloader/diag_downloader.go +++ b/cmd/diag/downloader/diag_downloader.go @@ -2,7 +2,6 @@ package downloader import ( "fmt" - "time" "github.com/jedib0t/go-pretty/v6/table" diff --git a/erigon-lib/go.mod b/erigon-lib/go.mod index e98345a3288..d4d1be91a55 100644 --- a/erigon-lib/go.mod +++ b/erigon-lib/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/erigontech/mdbx-go v0.38.0 - github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240505022337-08dce201593b + github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509021536-cfec520d992e github.com/ledgerwatch/interfaces v0.0.0-20240502103437-1e84e26799a6 github.com/ledgerwatch/log/v3 v3.9.0 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/erigon-lib/go.sum b/erigon-lib/go.sum index f072d1bfc54..ae314e3d662 100644 --- a/erigon-lib/go.sum +++ b/erigon-lib/go.sum @@ -270,8 +270,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240505022337-08dce201593b h1:lfllTgrcwFzFXX7c/L4i/xAj/8noP/yHNSmC8dDi08s= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240505022337-08dce201593b/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509021536-cfec520d992e h1:DH8LIA9AsMLFeGzKjsiU9d6NjR5oA5JANNqWU9bJifg= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509021536-cfec520d992e/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/interfaces v0.0.0-20240502103437-1e84e26799a6 h1:snFpr1kpUlT/ffEa29S9tGgu2uIaLJqA2wv9PuOlBvU= github.com/ledgerwatch/interfaces v0.0.0-20240502103437-1e84e26799a6/go.mod h1:ugQv1QllJzBny3cKZKxUrSnykkjkBgm27eQM6dnGAcc= github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= diff --git a/go.mod b/go.mod index d5de28c063b..4aae9024ef4 100644 --- a/go.mod +++ b/go.mod @@ -186,7 +186,7 @@ require ( github.com/koron/go-ssdp v0.0.4 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240505022337-08dce201593b // indirect + github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509021536-cfec520d992e // indirect github.com/ledgerwatch/erigonwatch v0.1.0 github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect diff --git a/go.sum b/go.sum index 3e097d5f3da..697467d97d2 100644 --- a/go.sum +++ b/go.sum @@ -539,8 +539,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240505022337-08dce201593b h1:lfllTgrcwFzFXX7c/L4i/xAj/8noP/yHNSmC8dDi08s= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240505022337-08dce201593b/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509021536-cfec520d992e h1:DH8LIA9AsMLFeGzKjsiU9d6NjR5oA5JANNqWU9bJifg= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509021536-cfec520d992e/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/erigonwatch v0.1.0 h1:TrCjklOu9ZI9/uiMigo1Jnknnk1I/dXUxXymA3xHfzo= github.com/ledgerwatch/erigonwatch v0.1.0/go.mod h1:uYq4hs3RL1OtIYRXAxYq02tpdGkx6rtXlpzdazDDbWI= github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= From 0ea45b72c6f3eea9ef7fe79b4af046975e873501 Mon Sep 17 00:00:00 2001 From: Somnath Date: Thu, 9 May 2024 03:25:13 -0400 Subject: [PATCH 05/48] EIP-2935 Historical block hashes (#9991) Ref https://eips.ethereum.org/EIPS/eip-2935 --- consensus/merge/merge.go | 3 + consensus/misc/eip2935.go | 42 +++++++++++ core/vm/eips.go | 12 +++ core/vm/instructions.go | 48 ++++++++++-- core/vm/jump_table.go | 1 + core/vm/operations_acl.go | 19 +++++ core/vm/runtime/runtime.go | 2 +- core/vm/runtime/runtime_test.go | 127 ++++++++++++++++++++++++++++++-- params/protocol_params.go | 7 ++ 9 files changed, 246 insertions(+), 15 deletions(-) create mode 100644 consensus/misc/eip2935.go diff --git a/consensus/merge/merge.go b/consensus/merge/merge.go index c5544ef33bd..823be69a219 100644 --- a/consensus/merge/merge.go +++ b/consensus/merge/merge.go @@ -282,6 +282,9 @@ func (s *Merge) Initialize(config *chain.Config, chain consensus.ChainHeaderRead return syscall(addr, data, state, header, false /* constCall */) }) } + if chain.Config().IsPrague(header.Time) { + misc.StoreBlockHashesEip2935(header, state, config, chain) + } } func (s *Merge) APIs(chain consensus.ChainHeaderReader) []rpc.API { diff --git a/consensus/misc/eip2935.go b/consensus/misc/eip2935.go new file mode 100644 index 00000000000..64d4bef1586 --- /dev/null +++ b/consensus/misc/eip2935.go @@ -0,0 +1,42 @@ +package misc + +import ( + "github.com/holiman/uint256" + + "github.com/ledgerwatch/erigon-lib/chain" + libcommon "github.com/ledgerwatch/erigon-lib/common" + + "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/params" +) + +func StoreBlockHashesEip2935(header *types.Header, state *state.IntraBlockState, config *chain.Config, headerReader consensus.ChainHeaderReader) { + headerNum := header.Number.Uint64() + if headerNum == 0 { // Activation of fork at Genesis + return + } + storeHash(headerNum-1, header.ParentHash, state) + // If this is the fork block, add the parent's direct `HISTORY_SERVE_WINDOW - 1` ancestors as well + parent := headerReader.GetHeader(header.ParentHash, headerNum-1) + if parent.Time < config.PragueTime.Uint64() { + p := headerNum - 1 + window := params.BlockHashHistoryServeWindow - 1 + if p < window { + window = p + } + for i := window; i > 0; i-- { + p = p - 1 + storeHash(p, parent.ParentHash, state) + parent = headerReader.GetHeader(parent.ParentHash, p) + } + } +} + +func storeHash(num uint64, hash libcommon.Hash, state *state.IntraBlockState) { + slotNum := num % params.BlockHashHistoryServeWindow + storageSlot := libcommon.BytesToHash(uint256.NewInt(slotNum).Bytes()) + parentHashInt := uint256.NewInt(0).SetBytes32(hash.Bytes()) + state.SetState(params.HistoryStorageAddress, &storageSlot, *parentHashInt) +} diff --git a/core/vm/eips.go b/core/vm/eips.go index 8d48f1a7b33..c05c41006fb 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -29,6 +29,7 @@ import ( ) var activators = map[int]func(*JumpTable){ + 2935: enable2935, 7516: enable7516, 6780: enable6780, 5656: enable5656, @@ -327,3 +328,14 @@ func enable7516(jt *JumpTable) { numPush: 1, } } + +// enable2935 applies EIP-2935 (Historical block hashes in state) +func enable2935(jt *JumpTable) { + jt[BLOCKHASH] = &operation{ + execute: opBlockhash2935, + constantGas: GasExtStep, + dynamicGas: gasOpBlockhashEIP2935, + numPop: 1, + numPush: 1, + } +} diff --git a/core/vm/instructions.go b/core/vm/instructions.go index b35de6adee6..21f9bf24d15 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -464,28 +464,60 @@ func opGasprice(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ return nil, nil } +// opBlockhash executes the BLOCKHASH opcode pre-EIP-2935 func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - num := scope.Stack.Peek() - num64, overflow := num.Uint64WithOverflow() + arg := scope.Stack.Peek() + arg64, overflow := arg.Uint64WithOverflow() if overflow { - num.Clear() + arg.Clear() return nil, nil } var upper, lower uint64 upper = interpreter.evm.Context.BlockNumber - if upper < 257 { + if upper <= params.BlockHashOldWindow { lower = 0 } else { - lower = upper - 256 + lower = upper - params.BlockHashOldWindow } - if num64 >= lower && num64 < upper { - num.SetBytes(interpreter.evm.Context.GetHash(num64).Bytes()) + if arg64 >= lower && arg64 < upper { + arg.SetBytes(interpreter.evm.Context.GetHash(arg64).Bytes()) } else { - num.Clear() + arg.Clear() } return nil, nil } +// opBlockhash2935 executes for the BLOCKHASH opcode post EIP-2935 by returning the +// corresponding hash for the blocknumber from the state, if within range. +// The range is defined by [head - params.BlockHashHistoryServeWindow - 1, head - 1] +// This should not be used without activating EIP-2935 +func opBlockhash2935(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + arg := scope.Stack.Peek() + arg64, overflow := arg.Uint64WithOverflow() + if overflow { + arg.Clear() + return nil, nil + } + + // Check if arg is within allowed window + var upper uint64 + upper = interpreter.evm.Context.BlockNumber + if arg64 >= upper || arg64+params.BlockHashHistoryServeWindow < upper { + arg.Clear() + return nil, nil + } + + // Return state read value from the slot + storageSlot := libcommon.BytesToHash(uint256.NewInt(arg64 % params.BlockHashHistoryServeWindow).Bytes()) + interpreter.evm.intraBlockState.GetState( + params.HistoryStorageAddress, + &storageSlot, + arg, + ) + + return nil, nil +} + func opCoinbase(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { scope.Stack.Push(new(uint256.Int).SetBytes(interpreter.evm.Context.Coinbase.Bytes())) return nil, nil diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 806ae494133..82c43dd3167 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -92,6 +92,7 @@ func validateAndFillMaxStack(jt *JumpTable) { // cancun, and prague instructions. func newPragueInstructionSet() JumpTable { instructionSet := newCancunInstructionSet() + enable2935(&instructionSet) validateAndFillMaxStack(&instructionSet) return instructionSet } diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index 6256ae5740b..1e1b68c6995 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -235,3 +235,22 @@ func makeSelfdestructGasFn(refundsEnabled bool) gasFunc { } return gasFunc } + +// gasOpBlockhashEIP2935 returns the gas for the new BLOCKHASH operation post EIP-2935 +// If arg is outside of the params.BlockHashHistoryServeWindow, zero dynamic gas is returned +// EIP-2929 Cold/Warm storage read cost is applicable here similar to SLOAD +func gasOpBlockhashEIP2935(evm *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) { + arg := stack.Peek() + arg64, overflow := arg.Uint64WithOverflow() + if overflow { + return 0, nil + } + if arg64 >= evm.Context.BlockNumber || arg64+params.BlockHashHistoryServeWindow < evm.Context.BlockNumber { + return 0, nil + } + storageSlot := libcommon.BytesToHash(uint256.NewInt(arg64 % params.BlockHashHistoryServeWindow).Bytes()) + if _, slotMod := evm.IntraBlockState().AddSlotToAccessList(params.HistoryStorageAddress, storageSlot); slotMod { + return params.ColdSloadCostEIP2929, nil + } + return params.WarmStorageReadCostEIP2929, nil +} diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index 032d1b2e4d9..cec1e7078b1 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -111,8 +111,8 @@ func setDefaults(cfg *Config) { func Execute(code, input []byte, cfg *Config, bn uint64) ([]byte, *state.IntraBlockState, error) { if cfg == nil { cfg = new(Config) + setDefaults(cfg) } - setDefaults(cfg) externalState := cfg.State != nil var tx kv.RwTx diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go index 1e326eea237..8553064707c 100644 --- a/core/vm/runtime/runtime_test.go +++ b/core/vm/runtime/runtime_test.go @@ -30,12 +30,15 @@ import ( "github.com/ledgerwatch/erigon/accounts/abi" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/asm" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/tracers/logger" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/rlp" ) func TestDefaults(t *testing.T) { @@ -235,7 +238,7 @@ func fakeHeader(n uint64, parentHash libcommon.Hash) *types.Header { Coinbase: libcommon.HexToAddress("0x00000000000000000000000000000000deadbeef"), Number: big.NewInt(int64(n)), ParentHash: parentHash, - Time: 1000, + Time: n, Nonce: types.BlockNonce{0x1}, Extra: []byte{}, Difficulty: big.NewInt(0), @@ -244,6 +247,45 @@ func fakeHeader(n uint64, parentHash libcommon.Hash) *types.Header { return &header } +// FakeChainHeaderReader implements consensus.ChainHeaderReader interface +type FakeChainHeaderReader struct{} + +func (cr *FakeChainHeaderReader) GetHeaderByHash(hash libcommon.Hash) *types.Header { + return nil +} +func (cr *FakeChainHeaderReader) GetHeaderByNumber(number uint64) *types.Header { + return cr.GetHeaderByHash(libcommon.BigToHash(big.NewInt(int64(number)))) +} +func (cr *FakeChainHeaderReader) Config() *chain.Config { return nil } +func (cr *FakeChainHeaderReader) CurrentHeader() *types.Header { return nil } + +// GetHeader returns a fake header with the parentHash equal to the number - 1 +func (cr *FakeChainHeaderReader) GetHeader(hash libcommon.Hash, number uint64) *types.Header { + return &types.Header{ + Coinbase: libcommon.HexToAddress("0x00000000000000000000000000000000deadbeef"), + Number: big.NewInt(int64(number)), + ParentHash: libcommon.BigToHash(big.NewInt(int64(number - 1))), + Time: number, + Nonce: types.BlockNonce{0x1}, + Extra: []byte{}, + Difficulty: big.NewInt(0), + GasLimit: 100000, + } +} +func (cr *FakeChainHeaderReader) GetBlock(hash libcommon.Hash, number uint64) *types.Block { + return nil +} +func (cr *FakeChainHeaderReader) HasBlock(hash libcommon.Hash, number uint64) bool { return false } +func (cr *FakeChainHeaderReader) GetTd(hash libcommon.Hash, number uint64) *big.Int { return nil } +func (cr *FakeChainHeaderReader) FrozenBlocks() uint64 { return 0 } +func (cr *FakeChainHeaderReader) BorEventsByBlock(hash libcommon.Hash, number uint64) []rlp.RawValue { + return nil +} +func (cr *FakeChainHeaderReader) BorStartEventID(hash libcommon.Hash, number uint64) uint64 { + return 0 +} +func (cr *FakeChainHeaderReader) BorSpan(spanId uint64) []byte { return nil } + type dummyChain struct { counter int } @@ -313,10 +355,14 @@ func TestBlockhash(t *testing.T) { // The method call to 'test()' input := libcommon.Hex2Bytes("f8a8fd6d") chain := &dummyChain{} - ret, _, err := Execute(data, input, &Config{ + cfg := &Config{ GetHashFn: core.GetHashFn(header, chain.GetHeader), BlockNumber: new(big.Int).Set(header.Number), - }, header.Number.Uint64()) + Time: new(big.Int), + } + setDefaults(cfg) + cfg.ChainConfig.PragueTime = big.NewInt(1) + ret, _, err := Execute(data, input, cfg, header.Number.Uint64()) if err != nil { t.Fatalf("expected no error, got %v", err) } @@ -341,6 +387,73 @@ func TestBlockhash(t *testing.T) { } } +func TestBlockHashEip2935(t *testing.T) { + t.Parallel() + + // This is the contract we're using. It requests the blockhash for current num (should be all zeroes), We are fetching BlockHash for current block (should be zer0), parent block, last block which is supposed to be there (head - HISTORY_SERVE_WINDOW) and also one block before that (should be zero) + + /* + pragma solidity ^0.8.25; + contract BlockHashTestPrague{ + function test() public view returns (bytes32, bytes32, bytes32, bytes32){ + uint256 head = block.number; + bytes32 zero = blockhash(head); + bytes32 first = blockhash(head-1); + bytes32 last = blockhash(head - 8192); + bytes32 beyond = blockhash(head - 8193); + return (zero, first, last, beyond); + } + } + */ + // The contract above + data := libcommon.Hex2Bytes("608060405234801561000f575f80fd5b5060043610610029575f3560e01c8063f8a8fd6d1461002d575b5f80fd5b61003561004e565b60405161004594939291906100bf565b60405180910390f35b5f805f805f4390505f814090505f6001836100699190610138565b4090505f6120008461007b9190610138565b4090505f6120018561008d9190610138565b409050838383839850985098509850505050505090919293565b5f819050919050565b6100b9816100a7565b82525050565b5f6080820190506100d25f8301876100b0565b6100df60208301866100b0565b6100ec60408301856100b0565b6100f960608301846100b0565b95945050505050565b5f819050919050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f61014282610102565b915061014d83610102565b92508282039050818111156101655761016461010b565b5b9291505056fea2646970667358221220bac67d00c05154c1dca13fe3c1493172d44692d312cb3fd72a3d7457874d595464736f6c63430008190033") + // The method call to 'test()' + input := libcommon.Hex2Bytes("f8a8fd6d") + + // Current head + n := uint64(10000) + parentHash := libcommon.Hash{} + s := common.LeftPadBytes(big.NewInt(int64(n-1)).Bytes(), 32) + copy(parentHash[:], s) + fakeHeaderReader := &FakeChainHeaderReader{} + header := fakeHeaderReader.GetHeader(libcommon.BigToHash(big.NewInt(int64(n))), n) + + chain := &dummyChain{} + cfg := &Config{ + GetHashFn: core.GetHashFn(header, chain.GetHeader), + BlockNumber: new(big.Int).Set(header.Number), + Time: big.NewInt(10000), + } + setDefaults(cfg) + cfg.ChainConfig.PragueTime = big.NewInt(10000) + _, tx := memdb.NewTestTx(t) + cfg.State = state.New(state.NewPlainStateReader(tx)) + cfg.State.CreateAccount(params.HistoryStorageAddress, true) + misc.StoreBlockHashesEip2935(header, cfg.State, cfg.ChainConfig, &FakeChainHeaderReader{}) + + ret, _, err := Execute(data, input, cfg, header.Number.Uint64()) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if len(ret) != 128 { + t.Fatalf("expected returndata to be 128 bytes, got %d", len(ret)) + } + + zero := new(big.Int).SetBytes(ret[0:32]) + first := new(big.Int).SetBytes(ret[32:64]) + last := new(big.Int).SetBytes(ret[64:96]) + beyond := new(big.Int).SetBytes(ret[96:128]) + if zero.Sign() != 0 || beyond.Sign() != 0 { + t.Fatalf("expected zeroes, got %x %x", ret[0:32], ret[96:128]) + } + if first.Uint64() != 9999 { + t.Fatalf("first block should be 9999, got %d (%x)", first, ret[32:64]) + } + if last.Uint64() != 1808 { + t.Fatalf("last block should be 1808, got %d (%x)", last, ret[64:96]) + } +} + // benchmarkNonModifyingCode benchmarks code, but if the code modifies the // state, this should not be used, since it does not reset the state between runs. func benchmarkNonModifyingCode(b *testing.B, gas uint64, code []byte, name string) { //nolint:unparam @@ -521,14 +634,16 @@ func TestEip2929Cases(t *testing.T) { fmt.Printf("%v\n\nBytecode: \n```\n0x%x\n```\nOperations: \n```\n%v\n```\n\n", comment, code, ops) - //nolint:errcheck - Execute(code, nil, &Config{ + cfg := &Config{ EVMConfig: vm.Config{ Debug: true, Tracer: logger.NewMarkdownLogger(nil, os.Stdout), ExtraEips: []int{2929}, }, - }, 0) + } + setDefaults(cfg) + //nolint:errcheck + Execute(code, nil, cfg, 0) } { // First eip testcase diff --git a/params/protocol_params.go b/params/protocol_params.go index d760de8658d..05e4fe52d9f 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -171,11 +171,18 @@ const ( // PIP-27: secp256r1 elliptic curve signature verifier gas price P256VerifyGas uint64 = 3450 + + // EIP-2935: Historical block hashes in state + BlockHashHistoryServeWindow uint64 = 8192 + BlockHashOldWindow uint64 = 256 ) // EIP-4788: Beacon block root in the EVM var BeaconRootsAddress = common.HexToAddress("0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02") +// EIP-2935: Historical block hashes in state +var HistoryStorageAddress = common.HexToAddress("0x25a219378dad9b3503c8268c9ca836a52427a4fb") + // Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations var Bls12381MultiExpDiscountTable = [128]uint64{1200, 888, 764, 641, 594, 547, 500, 453, 438, 423, 408, 394, 379, 364, 349, 334, 330, 326, 322, 318, 314, 310, 306, 302, 298, 294, 289, 285, 281, 277, 273, 269, 268, 266, 265, 263, 262, 260, 259, 257, 256, 254, 253, 251, 250, 248, 247, 245, 244, 242, 241, 239, 238, 236, 235, 233, 232, 231, 229, 228, 226, 225, 223, 222, 221, 220, 219, 219, 218, 217, 216, 216, 215, 214, 213, 213, 212, 211, 211, 210, 209, 208, 208, 207, 206, 205, 205, 204, 203, 202, 202, 201, 200, 199, 199, 198, 197, 196, 196, 195, 194, 193, 193, 192, 191, 191, 190, 189, 188, 188, 187, 186, 185, 185, 184, 183, 182, 182, 181, 180, 179, 179, 178, 177, 176, 176, 175, 174} From b12053cece1028307caeb0bae64cc7328022a6da Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Thu, 9 May 2024 16:17:47 +0700 Subject: [PATCH 06/48] e3: bor-mainnet fix broken v1-054600-054700-borspans.seg (#10242) Pick https://github.com/ledgerwatch/erigon-snapshot/pull/159 --- erigon-lib/go.mod | 2 +- erigon-lib/go.sum | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/erigon-lib/go.mod b/erigon-lib/go.mod index d4d1be91a55..13f49b82879 100644 --- a/erigon-lib/go.mod +++ b/erigon-lib/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/erigontech/mdbx-go v0.38.0 - github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509021536-cfec520d992e + github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509080801-633688230242 github.com/ledgerwatch/interfaces v0.0.0-20240502103437-1e84e26799a6 github.com/ledgerwatch/log/v3 v3.9.0 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/erigon-lib/go.sum b/erigon-lib/go.sum index ae314e3d662..8e1fc9c5c5f 100644 --- a/erigon-lib/go.sum +++ b/erigon-lib/go.sum @@ -270,8 +270,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509021536-cfec520d992e h1:DH8LIA9AsMLFeGzKjsiU9d6NjR5oA5JANNqWU9bJifg= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509021536-cfec520d992e/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509080801-633688230242 h1:EvaRNRzZc8OXCWutZnq93kdSSP9AiVuozwI8nTjeEYA= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509080801-633688230242/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/interfaces v0.0.0-20240502103437-1e84e26799a6 h1:snFpr1kpUlT/ffEa29S9tGgu2uIaLJqA2wv9PuOlBvU= github.com/ledgerwatch/interfaces v0.0.0-20240502103437-1e84e26799a6/go.mod h1:ugQv1QllJzBny3cKZKxUrSnykkjkBgm27eQM6dnGAcc= github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= diff --git a/go.mod b/go.mod index 4aae9024ef4..ef7eb01df5c 100644 --- a/go.mod +++ b/go.mod @@ -186,7 +186,7 @@ require ( github.com/koron/go-ssdp v0.0.4 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509021536-cfec520d992e // indirect + github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509080801-633688230242 // indirect github.com/ledgerwatch/erigonwatch v0.1.0 github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect diff --git a/go.sum b/go.sum index 697467d97d2..13d2c7d6f87 100644 --- a/go.sum +++ b/go.sum @@ -539,8 +539,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509021536-cfec520d992e h1:DH8LIA9AsMLFeGzKjsiU9d6NjR5oA5JANNqWU9bJifg= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509021536-cfec520d992e/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509080801-633688230242 h1:EvaRNRzZc8OXCWutZnq93kdSSP9AiVuozwI8nTjeEYA= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509080801-633688230242/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/erigonwatch v0.1.0 h1:TrCjklOu9ZI9/uiMigo1Jnknnk1I/dXUxXymA3xHfzo= github.com/ledgerwatch/erigonwatch v0.1.0/go.mod h1:uYq4hs3RL1OtIYRXAxYq02tpdGkx6rtXlpzdazDDbWI= github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= From fb716e155560f2bf1b90c56d6e744511882c39f9 Mon Sep 17 00:00:00 2001 From: seayyyy <163325936+seay404@users.noreply.github.com> Date: Thu, 9 May 2024 18:13:11 +0800 Subject: [PATCH 07/48] core/vm: remove redundant error check (#10246) --- core/vm/contracts.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 0542a10952b..d1a9d4c809c 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -1121,9 +1121,6 @@ func (c *bls12381MapFpToG1) Run(input []byte) ([]byte, error) { // Compute mapping r := bls12381.MapToG1(fe) - if err != nil { - return nil, err - } // Encode the G1 point to 128 bytes return encodePointG1(&r), nil @@ -1157,9 +1154,6 @@ func (c *bls12381MapFp2ToG2) Run(input []byte) ([]byte, error) { // Compute mapping r := bls12381.MapToG2(bls12381.E2{A0: c0, A1: c1}) - if err != nil { - return nil, err - } // Encode the G2 point to 256 bytes return encodePointG2(&r), nil From bd6f31c5da60e0ed79d2f0feb9709813f4705978 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Thu, 9 May 2024 23:38:17 +0700 Subject: [PATCH 08/48] e3: reduce prune collector size - to allow background sort (#10258) --- erigon-lib/state/domain.go | 4 ++-- erigon-lib/state/history.go | 6 +++--- erigon-lib/state/inverted_index.go | 9 +++++---- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/erigon-lib/state/domain.go b/erigon-lib/state/domain.go index 2239b990405..e82e77bbf63 100644 --- a/erigon-lib/state/domain.go +++ b/erigon-lib/state/domain.go @@ -456,8 +456,8 @@ func (dt *DomainRoTx) newWriter(tmpdir string, discard bool) *domainBufferedWrit aux: make([]byte, 0, 128), keysTable: dt.d.keysTable, valsTable: dt.d.valsTable, - keys: etl.NewCollector(dt.d.keysTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), dt.d.logger), - values: etl.NewCollector(dt.d.valsTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), dt.d.logger), + keys: etl.NewCollector("flush "+dt.d.keysTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), dt.d.logger), + values: etl.NewCollector("flush "+dt.d.valsTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), dt.d.logger), h: dt.ht.newWriter(tmpdir, discardHistory), } diff --git a/erigon-lib/state/history.go b/erigon-lib/state/history.go index 9c7f77a147a..71987862344 100644 --- a/erigon-lib/state/history.go +++ b/erigon-lib/state/history.go @@ -515,7 +515,7 @@ func (ht *HistoryRoTx) newWriter(tmpdir string, discard bool) *historyBufferedWr historyKey: make([]byte, 128), largeValues: ht.h.historyLargeValues, historyValsTable: ht.h.historyValsTable, - historyVals: etl.NewCollector(ht.h.historyValsTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), ht.h.logger), + historyVals: etl.NewCollector("flush "+ht.h.historyValsTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), ht.h.logger), ii: ht.iit.newWriter(tmpdir, discard), } @@ -598,7 +598,7 @@ func (h *History) collate(ctx context.Context, step, txFrom, txTo uint64, roTx k defer keysCursor.Close() binary.BigEndian.PutUint64(txKey[:], txFrom) - collector := etl.NewCollector(h.historyValsTable, h.iiCfg.dirs.Tmp, etl.NewSortableBuffer(CollateETLRAM), h.logger) + collector := etl.NewCollector("collate "+h.historyValsTable, h.iiCfg.dirs.Tmp, etl.NewSortableBuffer(CollateETLRAM), h.logger) defer collector.Close() for txnmb, k, err := keysCursor.Seek(txKey[:]); err == nil && txnmb != nil; txnmb, k, err = keysCursor.Next() { @@ -636,7 +636,7 @@ func (h *History) collate(ctx context.Context, step, txFrom, txTo uint64, roTx k defer cd.Close() } - efComp, err := seg.NewCompressor(ctx, "ef history", efHistoryPath, h.dirs.Tmp, seg.MinPatternScore, h.compressWorkers, log.LvlTrace, h.logger) + efComp, err := seg.NewCompressor(ctx, "collate ef history", efHistoryPath, h.dirs.Tmp, seg.MinPatternScore, h.compressWorkers, log.LvlTrace, h.logger) if err != nil { return HistoryCollation{}, fmt.Errorf("create %s ef history compressor: %w", h.filenameBase, err) } diff --git a/erigon-lib/state/inverted_index.go b/erigon-lib/state/inverted_index.go index 781121116e9..7e10eef076a 100644 --- a/erigon-lib/state/inverted_index.go +++ b/erigon-lib/state/inverted_index.go @@ -514,8 +514,8 @@ func (iit *InvertedIndexRoTx) newWriter(tmpdir string, discard bool) *invertedIn indexKeysTable: iit.ii.indexKeysTable, indexTable: iit.ii.indexTable, // etl collector doesn't fsync: means if have enough ram, all files produced by all collectors will be in ram - indexKeys: etl.NewCollector(iit.ii.indexKeysTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), iit.ii.logger), - index: etl.NewCollector(iit.ii.indexTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), iit.ii.logger), + indexKeys: etl.NewCollector("flush "+iit.ii.indexKeysTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), iit.ii.logger), + index: etl.NewCollector("flush "+iit.ii.indexTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), iit.ii.logger), } w.indexKeys.LogLvl(log.LvlTrace) w.index.LogLvl(log.LvlTrace) @@ -895,7 +895,7 @@ func (iit *InvertedIndexRoTx) Prune(ctx context.Context, rwTx kv.RwTx, txFrom, t return nil, nil } - collector := etl.NewCollector("snapshots", ii.dirs.Tmp, etl.NewOldestEntryBuffer(etl.BufferOptimalSize), ii.logger) + collector := etl.NewCollector("snapshots", ii.dirs.Tmp, etl.NewOldestEntryBuffer(etl.BufferOptimalSize/8), ii.logger) defer collector.Close() collector.LogLvl(log.LvlDebug) collector.SortAndFlushInBackground(true) @@ -1490,8 +1490,9 @@ func (ii *InvertedIndex) collate(ctx context.Context, step uint64, roTx kv.Tx) ( } defer keysCursor.Close() - collector := etl.NewCollector(ii.indexKeysTable, ii.iiCfg.dirs.Tmp, etl.NewSortableBuffer(CollateETLRAM), ii.logger) + collector := etl.NewCollector("collate "+ii.indexKeysTable, ii.iiCfg.dirs.Tmp, etl.NewSortableBuffer(CollateETLRAM), ii.logger) defer collector.Close() + collector.LogLvl(log.LvlTrace) var txKey [8]byte binary.BigEndian.PutUint64(txKey[:], txFrom) From fd7537ec9681523473c8f40aacece20843d5efba Mon Sep 17 00:00:00 2001 From: Willian Mitsuda Date: Thu, 9 May 2024 23:10:01 -0300 Subject: [PATCH 09/48] Simplify WrapKV in order to prevent breaking KV invariant 2 (#10264) This is similar to https://github.com/ledgerwatch/erigon/pull/10206 , but I think this code is not actually exercised because it seems it would only be used if the first iter from `MergeKVS` is `nil`. Anyway, since the code exists, it could be prone to the same double caching issue, and can be simplified by turning it into a pass-through adapter. Also removed the `nil` check on `Close()` from both iterators since the `nil` check is done in the constructor and it doesn't keep internal state anymore. --- erigon-lib/kv/iter/iter_exact.go | 39 ++++++-------------------------- 1 file changed, 7 insertions(+), 32 deletions(-) diff --git a/erigon-lib/kv/iter/iter_exact.go b/erigon-lib/kv/iter/iter_exact.go index 3eb3d8ecc9d..8261a39dc82 100644 --- a/erigon-lib/kv/iter/iter_exact.go +++ b/erigon-lib/kv/iter/iter_exact.go @@ -173,48 +173,27 @@ func (m *UnionKVIter) Close() { } type WrapKVSIter struct { - y KV - yHasNext bool - yNextK, yNextV []byte - err error + y KV } func WrapKVS(y KV) KVS { if y == nil { return EmptyKVS } - m := &WrapKVSIter{y: y} - m.advance() - return m + return &WrapKVSIter{y: y} } func (m *WrapKVSIter) HasNext() bool { - return m.err != nil || m.yHasNext -} -func (m *WrapKVSIter) advance() { - if m.err != nil { - return - } - m.yHasNext = m.y.HasNext() - if m.yHasNext { - m.yNextK, m.yNextV, m.err = m.y.Next() - } + return m.y.HasNext() } + func (m *WrapKVSIter) Next() ([]byte, []byte, uint64, error) { - if m.err != nil { - return nil, nil, 0, m.err - } - k, v, err := m.yNextK, m.yNextV, m.err - m.advance() + k, v, err := m.y.Next() return k, v, 0, err } -// func (m *WrapKVSIter) ToArray() (keys, values [][]byte, err error) { return ToArrayKV(m) } func (m *WrapKVSIter) Close() { - if m.y != nil { - m.y.Close() - m.y = nil - } + m.y.Close() } type WrapKVIter struct { @@ -237,12 +216,8 @@ func (m *WrapKVIter) Next() ([]byte, []byte, error) { return k, v, err } -// func (m *WrapKVIter) ToArray() (keys, values [][]byte, err error) { return ToArrayKV(m) } func (m *WrapKVIter) Close() { - if m.x != nil { - m.x.Close() - m.x = nil - } + m.x.Close() } // MergedKV - merge 2 kv.Pairs streams (without replacements, or "shadowing", From 65d5b8001183f51c902f1038aef5ffbcc7e1663e Mon Sep 17 00:00:00 2001 From: Dmytro Date: Fri, 10 May 2024 03:19:42 +0100 Subject: [PATCH 10/48] diagnostics: refactor api url (#10262) --- cmd/diag/db/db.go | 4 ++-- cmd/diag/downloader/diag_downloader.go | 2 +- cmd/diag/flags/flags.go | 2 ++ cmd/diag/stages/stages.go | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/cmd/diag/db/db.go b/cmd/diag/db/db.go index 6600b126ba7..412902505e2 100644 --- a/cmd/diag/db/db.go +++ b/cmd/diag/db/db.go @@ -173,7 +173,7 @@ func DBsInfo(cliCtx *cli.Context) ([]DBInfo, error) { func getAllDbsNames(cliCtx *cli.Context) ([]string, error) { var data []string - url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + "/debug/diag/dbs" + url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + flags.ApiPath + "/dbs" err := util.MakeHttpGetCall(cliCtx.Context, url, &data) if err != nil { @@ -185,7 +185,7 @@ func getAllDbsNames(cliCtx *cli.Context) ([]string, error) { func getDb(cliCtx *cli.Context, dbName string) ([]BDTableInfo, error) { var data []BDTableInfo - url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + "/debug/diag/dbs/" + dbName + "/tables" + url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + flags.ApiPath + "/dbs/" + dbName + "/tables" err := util.MakeHttpGetCall(cliCtx.Context, url, &data) if err != nil { diff --git a/cmd/diag/downloader/diag_downloader.go b/cmd/diag/downloader/diag_downloader.go index 2f8584dfc2c..ab5aaf496bc 100644 --- a/cmd/diag/downloader/diag_downloader.go +++ b/cmd/diag/downloader/diag_downloader.go @@ -268,7 +268,7 @@ func getFileDownloadRate(peers []diagnostics.SegmentPeer) uint64 { func getData(cliCtx *cli.Context) (diagnostics.SyncStatistics, error) { var data diagnostics.SyncStatistics - url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + "/debug/diag/snapshot-sync" + url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + flags.ApiPath + "/snapshot-sync" err := util.MakeHttpGetCall(cliCtx.Context, url, &data) diff --git a/cmd/diag/flags/flags.go b/cmd/diag/flags/flags.go index a172bfb3f3e..c8ecdc0f0ae 100644 --- a/cmd/diag/flags/flags.go +++ b/cmd/diag/flags/flags.go @@ -3,6 +3,8 @@ package flags import "github.com/urfave/cli/v2" var ( + ApiPath = "/debug/diag" + DebugURLFlag = cli.StringFlag{ Name: "debug.addr", Aliases: []string{"da"}, diff --git a/cmd/diag/stages/stages.go b/cmd/diag/stages/stages.go index 9addc0ff585..efbf9d39f91 100644 --- a/cmd/diag/stages/stages.go +++ b/cmd/diag/stages/stages.go @@ -32,7 +32,7 @@ var Command = cli.Command{ func printCurentStage(cliCtx *cli.Context) error { var data diagnostics.SyncStatistics - url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + "/debug/diag/snapshot-sync" + url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + flags.ApiPath + "/snapshot-sync" err := util.MakeHttpGetCall(cliCtx.Context, url, &data) if err != nil { From e75d359248380718f6f2671eea62e43bdce7c8c7 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Fri, 10 May 2024 04:20:20 +0200 Subject: [PATCH 11/48] Fixed E3 <-> Caplin on Gnosis,Holesky and Chiado (#10265) --- eth/backend.go | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/eth/backend.go b/eth/backend.go index e870aee9f51..1d3ebecb2d7 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -897,23 +897,11 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.pipelineStagedSync = stagedsync.New(config.Sync, pipelineStages, stagedsync.PipelineUnwindOrder, stagedsync.PipelinePruneOrder, logger) backend.eth1ExecutionServer = eth1.NewEthereumExecutionModule(blockReader, backend.chainDB, backend.pipelineStagedSync, backend.forkValidator, chainConfig, assembleBlockPOS, hook, backend.notifications.Accumulator, backend.notifications.StateChangesConsumer, logger, backend.engine, config.HistoryV3, config.Sync, ctx) executionRpc := direct.NewExecutionClientDirect(backend.eth1ExecutionServer) - engineBackendRPC := engineapi.NewEngineServer( - logger, - chainConfig, - executionRpc, - backend.sentriesClient.Hd, - engine_block_downloader.NewEngineBlockDownloader(ctx, - logger, backend.sentriesClient.Hd, executionRpc, - backend.sentriesClient.Bd, backend.sentriesClient.BroadcastNewBlock, backend.sentriesClient.SendBodyRequest, blockReader, - backend.chainDB, chainConfig, tmpdir, config.Sync), - config.InternalCL, - false, - config.Miner.EnabledPOS) - backend.engineBackendRPC = engineBackendRPC var executionEngine executionclient.ExecutionEngine + caplinUseEngineAPI := config.NetworkID == uint64(clparams.GnosisNetwork) || config.NetworkID == uint64(clparams.HoleskyNetwork) || config.NetworkID == uint64(clparams.GoerliNetwork) // Gnosis has too few blocks on his network for phase2 to work. Once we have proper snapshot automation, it can go back to normal. - if config.NetworkID == uint64(clparams.GnosisNetwork) || config.NetworkID == uint64(clparams.HoleskyNetwork) || config.NetworkID == uint64(clparams.GoerliNetwork) { + if caplinUseEngineAPI { // Read the jwt secret jwtSecret, err := cli.ObtainJWTSecret(&stack.Config().Http, logger) if err != nil { @@ -929,6 +917,19 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger return nil, err } } + engineBackendRPC := engineapi.NewEngineServer( + logger, + chainConfig, + executionRpc, + backend.sentriesClient.Hd, + engine_block_downloader.NewEngineBlockDownloader(ctx, + logger, backend.sentriesClient.Hd, executionRpc, + backend.sentriesClient.Bd, backend.sentriesClient.BroadcastNewBlock, backend.sentriesClient.SendBodyRequest, blockReader, + backend.chainDB, chainConfig, tmpdir, config.Sync), + config.InternalCL && !caplinUseEngineAPI, // If the chain supports the engine API, then we should not make the server fail. + false, + config.Miner.EnabledPOS) + backend.engineBackendRPC = engineBackendRPC // If we choose not to run a consensus layer, run our embedded. if config.InternalCL && clparams.EmbeddedSupported(config.NetworkID) { From 683537e0e7ea609b51943199ab51c53f27b86dc6 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Fri, 10 May 2024 11:34:06 +0700 Subject: [PATCH 12/48] e3: rename HistoryGet to HistorySeek (#10270) - rename HistoryGet to HistorySeek - ii.Seek to ii.seekInFiles - h.GetNoState to h.historySeekInFiles - h.GetNoStateWithRecent to h.HistorySeek - h.getNoStateFromDB to h.historySeekInDB --- Makefile | 2 +- erigon-lib/go.mod | 2 +- erigon-lib/go.sum | 4 +- erigon-lib/gointerfaces/remoteproto/kv.pb.go | 331 +++++++++--------- .../remoteproto/kv_client_mock.go | 52 +-- .../gointerfaces/remoteproto/kv_grpc.pb.go | 30 +- erigon-lib/kv/kv_interface.go | 10 +- erigon-lib/kv/mdbx/kv_abstract_test.go | 2 +- erigon-lib/kv/mdbx/kv_mdbx.go | 2 +- .../kv/membatchwithdb/memory_mutation.go | 4 +- erigon-lib/kv/remotedb/kv_remote.go | 6 +- .../kv/remotedbserver/remotedbserver.go | 8 +- erigon-lib/kv/tables.go | 6 +- erigon-lib/kv/temporal/kv_temporal.go | 4 +- erigon-lib/state/aggregator.go | 12 +- erigon-lib/state/bps_tree.go | 3 +- erigon-lib/state/btree_index.go | 4 +- erigon-lib/state/domain.go | 2 +- erigon-lib/state/domain_shared.go | 2 +- erigon-lib/state/domain_shared_bench_test.go | 2 +- erigon-lib/state/history.go | 154 ++++---- erigon-lib/state/history_test.go | 14 +- erigon-lib/state/inverted_index.go | 2 +- erigon-lib/types/txn.go | 2 +- eth/stagedsync/stage_custom_trace.go | 2 +- ethdb/walk.go | 2 +- p2p/enode/nodedb.go | 2 +- polygon/p2p/service_mock.go | 8 +- turbo/jsonrpc/otterscan_contract_creator.go | 4 +- ...terscan_transaction_by_sender_and_nonce.go | 4 +- turbo/trie/trie_root.go | 2 +- 31 files changed, 346 insertions(+), 338 deletions(-) diff --git a/Makefile b/Makefile index 44afb3bac80..0dcbf5f9df5 100644 --- a/Makefile +++ b/Makefile @@ -211,7 +211,7 @@ mocks: mocks-clean ## mocks-clean: cleans all generated test mocks mocks-clean: - grep -r -l --exclude-dir="erigon-lib" --exclude-dir="*$(GOBINREL)*" "^// Code generated by MockGen. DO NOT EDIT.$$" . | xargs rm -r + grep -r -l --exclude-dir="erigon-lib" --exclude-dir="tests" --exclude-dir="*$(GOBINREL)*" "^// Code generated by MockGen. DO NOT EDIT.$$" . | xargs rm -r ## solc: generate all solidity contracts solc: diff --git a/erigon-lib/go.mod b/erigon-lib/go.mod index 13f49b82879..2d2a49177b6 100644 --- a/erigon-lib/go.mod +++ b/erigon-lib/go.mod @@ -5,7 +5,7 @@ go 1.21 require ( github.com/erigontech/mdbx-go v0.38.0 github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509080801-633688230242 - github.com/ledgerwatch/interfaces v0.0.0-20240502103437-1e84e26799a6 + github.com/ledgerwatch/interfaces v0.0.0-20240510032129-13f644ca2307 github.com/ledgerwatch/log/v3 v3.9.0 github.com/ledgerwatch/secp256k1 v1.0.0 ) diff --git a/erigon-lib/go.sum b/erigon-lib/go.sum index 8e1fc9c5c5f..c02a381b093 100644 --- a/erigon-lib/go.sum +++ b/erigon-lib/go.sum @@ -272,8 +272,8 @@ github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7 github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509080801-633688230242 h1:EvaRNRzZc8OXCWutZnq93kdSSP9AiVuozwI8nTjeEYA= github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509080801-633688230242/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= -github.com/ledgerwatch/interfaces v0.0.0-20240502103437-1e84e26799a6 h1:snFpr1kpUlT/ffEa29S9tGgu2uIaLJqA2wv9PuOlBvU= -github.com/ledgerwatch/interfaces v0.0.0-20240502103437-1e84e26799a6/go.mod h1:ugQv1QllJzBny3cKZKxUrSnykkjkBgm27eQM6dnGAcc= +github.com/ledgerwatch/interfaces v0.0.0-20240510032129-13f644ca2307 h1:v2syJaHSCTSEnzwFUW4F6FL92ZAnKEoyBesnm2E/IEU= +github.com/ledgerwatch/interfaces v0.0.0-20240510032129-13f644ca2307/go.mod h1:ugQv1QllJzBny3cKZKxUrSnykkjkBgm27eQM6dnGAcc= github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= github.com/ledgerwatch/log/v3 v3.9.0/go.mod h1:EiAY6upmI/6LkNhOVxb4eVsmsP11HZCnZ3PlJMjYiqE= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= diff --git a/erigon-lib/gointerfaces/remoteproto/kv.pb.go b/erigon-lib/gointerfaces/remoteproto/kv.pb.go index edd6463bf1d..b88e9200724 100644 --- a/erigon-lib/gointerfaces/remoteproto/kv.pb.go +++ b/erigon-lib/gointerfaces/remoteproto/kv.pb.go @@ -1085,7 +1085,7 @@ func (x *DomainGetReply) GetOk() bool { return false } -type HistoryGetReq struct { +type HistorySeekReq struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -1096,8 +1096,8 @@ type HistoryGetReq struct { Ts uint64 `protobuf:"varint,4,opt,name=ts,proto3" json:"ts,omitempty"` } -func (x *HistoryGetReq) Reset() { - *x = HistoryGetReq{} +func (x *HistorySeekReq) Reset() { + *x = HistorySeekReq{} if protoimpl.UnsafeEnabled { mi := &file_remote_kv_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1105,13 +1105,13 @@ func (x *HistoryGetReq) Reset() { } } -func (x *HistoryGetReq) String() string { +func (x *HistorySeekReq) String() string { return protoimpl.X.MessageStringOf(x) } -func (*HistoryGetReq) ProtoMessage() {} +func (*HistorySeekReq) ProtoMessage() {} -func (x *HistoryGetReq) ProtoReflect() protoreflect.Message { +func (x *HistorySeekReq) ProtoReflect() protoreflect.Message { mi := &file_remote_kv_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1123,40 +1123,40 @@ func (x *HistoryGetReq) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use HistoryGetReq.ProtoReflect.Descriptor instead. -func (*HistoryGetReq) Descriptor() ([]byte, []int) { +// Deprecated: Use HistorySeekReq.ProtoReflect.Descriptor instead. +func (*HistorySeekReq) Descriptor() ([]byte, []int) { return file_remote_kv_proto_rawDescGZIP(), []int{12} } -func (x *HistoryGetReq) GetTxId() uint64 { +func (x *HistorySeekReq) GetTxId() uint64 { if x != nil { return x.TxId } return 0 } -func (x *HistoryGetReq) GetTable() string { +func (x *HistorySeekReq) GetTable() string { if x != nil { return x.Table } return "" } -func (x *HistoryGetReq) GetK() []byte { +func (x *HistorySeekReq) GetK() []byte { if x != nil { return x.K } return nil } -func (x *HistoryGetReq) GetTs() uint64 { +func (x *HistorySeekReq) GetTs() uint64 { if x != nil { return x.Ts } return 0 } -type HistoryGetReply struct { +type HistorySeekReply struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -1165,8 +1165,8 @@ type HistoryGetReply struct { Ok bool `protobuf:"varint,2,opt,name=ok,proto3" json:"ok,omitempty"` } -func (x *HistoryGetReply) Reset() { - *x = HistoryGetReply{} +func (x *HistorySeekReply) Reset() { + *x = HistorySeekReply{} if protoimpl.UnsafeEnabled { mi := &file_remote_kv_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1174,13 +1174,13 @@ func (x *HistoryGetReply) Reset() { } } -func (x *HistoryGetReply) String() string { +func (x *HistorySeekReply) String() string { return protoimpl.X.MessageStringOf(x) } -func (*HistoryGetReply) ProtoMessage() {} +func (*HistorySeekReply) ProtoMessage() {} -func (x *HistoryGetReply) ProtoReflect() protoreflect.Message { +func (x *HistorySeekReply) ProtoReflect() protoreflect.Message { mi := &file_remote_kv_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1192,19 +1192,19 @@ func (x *HistoryGetReply) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use HistoryGetReply.ProtoReflect.Descriptor instead. -func (*HistoryGetReply) Descriptor() ([]byte, []int) { +// Deprecated: Use HistorySeekReply.ProtoReflect.Descriptor instead. +func (*HistorySeekReply) Descriptor() ([]byte, []int) { return file_remote_kv_proto_rawDescGZIP(), []int{13} } -func (x *HistoryGetReply) GetV() []byte { +func (x *HistorySeekReply) GetV() []byte { if x != nil { return x.V } return nil } -func (x *HistoryGetReply) GetOk() bool { +func (x *HistorySeekReply) GetOk() bool { if x != nil { return x.Ok } @@ -1891,143 +1891,144 @@ var file_remote_kv_proto_rawDesc = []byte{ 0x74, 0x65, 0x73, 0x74, 0x22, 0x2e, 0x0a, 0x0e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x76, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x02, 0x6f, 0x6b, 0x22, 0x58, 0x0a, 0x0d, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x47, - 0x65, 0x74, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x12, 0x0c, 0x0a, 0x01, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x0e, - 0x0a, 0x02, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x22, 0x2f, - 0x0a, 0x0f, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x76, 0x12, - 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x22, - 0xeb, 0x01, 0x0a, 0x0d, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, - 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x0c, 0x0a, 0x01, - 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, - 0x6f, 0x6d, 0x5f, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x12, 0x52, 0x06, 0x66, 0x72, 0x6f, - 0x6d, 0x54, 0x73, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x6f, 0x5f, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x12, 0x52, 0x04, 0x74, 0x6f, 0x54, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, - 0x72, 0x5f, 0x61, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, - 0x6f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, - 0x69, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, - 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, - 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x59, 0x0a, - 0x0f, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, - 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xdf, 0x01, 0x0a, 0x0f, 0x48, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, - 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, - 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, 0x6f, 0x6d, 0x5f, - 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x12, 0x52, 0x06, 0x66, 0x72, 0x6f, 0x6d, 0x54, 0x73, - 0x12, 0x13, 0x0a, 0x05, 0x74, 0x6f, 0x5f, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x12, 0x52, - 0x04, 0x74, 0x6f, 0x54, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x61, - 0x73, 0x63, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x72, 0x64, - 0x65, 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, - 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1b, - 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, - 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8a, 0x02, 0x0a, 0x0e, 0x44, - 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, - 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, - 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x72, 0x6f, 0x6d, - 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x66, 0x72, 0x6f, 0x6d, - 0x4b, 0x65, 0x79, 0x12, 0x15, 0x0a, 0x06, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x05, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x73, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, - 0x74, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x6c, 0x61, 0x74, 0x65, - 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x63, 0x65, - 0x6e, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x41, - 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, - 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, - 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, - 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x5b, 0x0a, 0x05, 0x50, 0x61, 0x69, 0x72, 0x73, - 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x04, - 0x6b, 0x65, 0x79, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, - 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x42, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x69, 0x73, 0x50, 0x61, 0x67, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x78, 0x74, 0x5f, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6e, 0x65, 0x78, 0x74, 0x4b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x4f, 0x0a, 0x0f, 0x49, 0x6e, 0x64, 0x65, - 0x78, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6e, - 0x65, 0x78, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x74, - 0x61, 0x6d, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2a, 0x86, 0x02, 0x0a, 0x02, 0x4f, 0x70, - 0x12, 0x09, 0x0a, 0x05, 0x46, 0x49, 0x52, 0x53, 0x54, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x46, - 0x49, 0x52, 0x53, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x45, - 0x45, 0x4b, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x42, 0x4f, 0x54, - 0x48, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x55, 0x52, 0x52, 0x45, 0x4e, 0x54, 0x10, 0x04, - 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x41, 0x53, 0x54, 0x10, 0x06, 0x12, 0x0c, 0x0a, 0x08, 0x4c, 0x41, - 0x53, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x07, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x45, 0x58, 0x54, - 0x10, 0x08, 0x12, 0x0c, 0x0a, 0x08, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x09, - 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x4e, 0x4f, 0x5f, 0x44, 0x55, 0x50, 0x10, - 0x0b, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x52, 0x45, 0x56, 0x10, 0x0c, 0x12, 0x0c, 0x0a, 0x08, 0x50, - 0x52, 0x45, 0x56, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, 0x50, 0x52, 0x45, - 0x56, 0x5f, 0x4e, 0x4f, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0e, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x45, - 0x45, 0x4b, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x0f, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, - 0x45, 0x4b, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x10, 0x12, - 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, 0x1e, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, - 0x53, 0x45, 0x10, 0x1f, 0x12, 0x11, 0x0a, 0x0d, 0x4f, 0x50, 0x45, 0x4e, 0x5f, 0x44, 0x55, 0x50, - 0x5f, 0x53, 0x4f, 0x52, 0x54, 0x10, 0x20, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4f, 0x55, 0x4e, 0x54, - 0x10, 0x21, 0x2a, 0x48, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, - 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x53, - 0x45, 0x52, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x02, 0x12, - 0x0f, 0x0a, 0x0b, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x03, - 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x04, 0x2a, 0x24, 0x0a, 0x09, - 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x4f, 0x52, - 0x57, 0x41, 0x52, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x4e, 0x57, 0x49, 0x4e, 0x44, - 0x10, 0x01, 0x32, 0xba, 0x04, 0x0a, 0x02, 0x4b, 0x56, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x12, 0x26, 0x0a, 0x02, 0x54, 0x78, 0x12, 0x0e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x2e, 0x43, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x1a, 0x0c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x2e, 0x50, 0x61, 0x69, 0x72, 0x28, 0x01, 0x30, 0x01, 0x12, 0x46, 0x0a, 0x0c, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x1a, 0x2e, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x30, - 0x01, 0x12, 0x3d, 0x0a, 0x09, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x18, - 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x12, 0x28, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x10, 0x2e, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0d, 0x2e, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x39, 0x0a, 0x09, 0x44, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x12, 0x14, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x2e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x0a, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x47, 0x65, 0x74, 0x12, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x48, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x47, 0x65, 0x74, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x0a, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x12, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x12, 0x36, 0x0a, 0x0c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x12, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0d, 0x2e, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x34, 0x0a, 0x0b, 0x44, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x2e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, - 0x1a, 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x42, - 0x16, 0x5a, 0x14, 0x2e, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x3b, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x02, 0x6f, 0x6b, 0x22, 0x59, 0x0a, 0x0e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x53, + 0x65, 0x65, 0x6b, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x12, 0x0c, 0x0a, 0x01, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, + 0x0e, 0x0a, 0x02, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x22, + 0x30, 0x0a, 0x10, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, + 0x76, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, + 0x6b, 0x22, 0xeb, 0x01, 0x0a, 0x0d, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x0c, + 0x0a, 0x01, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x17, 0x0a, 0x07, + 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x12, 0x52, 0x06, 0x66, + 0x72, 0x6f, 0x6d, 0x54, 0x73, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x6f, 0x5f, 0x74, 0x73, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x12, 0x52, 0x04, 0x74, 0x6f, 0x54, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, + 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, + 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0x59, 0x0a, 0x0f, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, + 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xdf, 0x01, 0x0a, 0x0f, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x13, + 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, + 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, 0x6f, + 0x6d, 0x5f, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x12, 0x52, 0x06, 0x66, 0x72, 0x6f, 0x6d, + 0x54, 0x73, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x6f, 0x5f, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x12, 0x52, 0x04, 0x74, 0x6f, 0x54, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, + 0x5f, 0x61, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, + 0x72, 0x64, 0x65, 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8a, 0x02, 0x0a, + 0x0e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, + 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, + 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x72, + 0x6f, 0x6d, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x66, 0x72, + 0x6f, 0x6d, 0x4b, 0x65, 0x79, 0x12, 0x15, 0x0a, 0x06, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x0a, 0x02, + 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, + 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x6c, 0x61, + 0x74, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, + 0x63, 0x65, 0x6e, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, + 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x5b, 0x0a, 0x05, 0x50, 0x61, 0x69, + 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, + 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x26, + 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, + 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x42, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x69, 0x73, 0x50, + 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6e, 0x65, 0x78, + 0x74, 0x4b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x4f, 0x0a, 0x0f, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, + 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x54, 0x69, 0x6d, 0x65, + 0x53, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2a, 0x86, 0x02, 0x0a, 0x02, + 0x4f, 0x70, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x49, 0x52, 0x53, 0x54, 0x10, 0x00, 0x12, 0x0d, 0x0a, + 0x09, 0x46, 0x49, 0x52, 0x53, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, + 0x53, 0x45, 0x45, 0x4b, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x42, + 0x4f, 0x54, 0x48, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x55, 0x52, 0x52, 0x45, 0x4e, 0x54, + 0x10, 0x04, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x41, 0x53, 0x54, 0x10, 0x06, 0x12, 0x0c, 0x0a, 0x08, + 0x4c, 0x41, 0x53, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x07, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x45, + 0x58, 0x54, 0x10, 0x08, 0x12, 0x0c, 0x0a, 0x08, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x44, 0x55, 0x50, + 0x10, 0x09, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x4e, 0x4f, 0x5f, 0x44, 0x55, + 0x50, 0x10, 0x0b, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x52, 0x45, 0x56, 0x10, 0x0c, 0x12, 0x0c, 0x0a, + 0x08, 0x50, 0x52, 0x45, 0x56, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, 0x50, + 0x52, 0x45, 0x56, 0x5f, 0x4e, 0x4f, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0e, 0x12, 0x0e, 0x0a, 0x0a, + 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x0f, 0x12, 0x13, 0x0a, 0x0f, + 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, + 0x10, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, 0x1e, 0x12, 0x09, 0x0a, 0x05, 0x43, + 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x1f, 0x12, 0x11, 0x0a, 0x0d, 0x4f, 0x50, 0x45, 0x4e, 0x5f, 0x44, + 0x55, 0x50, 0x5f, 0x53, 0x4f, 0x52, 0x54, 0x10, 0x20, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4f, 0x55, + 0x4e, 0x54, 0x10, 0x21, 0x2a, 0x48, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, + 0x0a, 0x07, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, + 0x50, 0x53, 0x45, 0x52, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x44, 0x45, 0x10, + 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x5f, 0x43, 0x4f, 0x44, 0x45, + 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x04, 0x2a, 0x24, + 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x46, + 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x4e, 0x57, 0x49, + 0x4e, 0x44, 0x10, 0x01, 0x32, 0xbd, 0x04, 0x0a, 0x02, 0x4b, 0x56, 0x12, 0x36, 0x0a, 0x07, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x02, 0x54, 0x78, 0x12, 0x0e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x2e, 0x43, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x1a, 0x0c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x28, 0x01, 0x30, 0x01, 0x12, 0x46, 0x0a, 0x0c, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x1a, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x61, 0x74, 0x63, + 0x68, 0x30, 0x01, 0x12, 0x3d, 0x0a, 0x09, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, + 0x12, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x10, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0d, 0x2e, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x39, 0x0a, 0x09, + 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x12, 0x14, 0x2e, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x2e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x1a, + 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, + 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3f, 0x0a, 0x0b, 0x48, 0x69, 0x73, 0x74, 0x6f, + 0x72, 0x79, 0x53, 0x65, 0x65, 0x6b, 0x12, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, + 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, 0x71, 0x1a, 0x18, + 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x53, + 0x65, 0x65, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x0a, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x36, 0x0a, 0x0c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, + 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, + 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, + 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x34, + 0x0a, 0x0b, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x16, 0x2e, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, + 0x61, 0x69, 0x72, 0x73, 0x42, 0x16, 0x5a, 0x14, 0x2e, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x3b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2060,8 +2061,8 @@ var file_remote_kv_proto_goTypes = []interface{}{ (*RangeReq)(nil), // 12: remote.RangeReq (*DomainGetReq)(nil), // 13: remote.DomainGetReq (*DomainGetReply)(nil), // 14: remote.DomainGetReply - (*HistoryGetReq)(nil), // 15: remote.HistoryGetReq - (*HistoryGetReply)(nil), // 16: remote.HistoryGetReply + (*HistorySeekReq)(nil), // 15: remote.HistorySeekReq + (*HistorySeekReply)(nil), // 16: remote.HistorySeekReply (*IndexRangeReq)(nil), // 17: remote.IndexRangeReq (*IndexRangeReply)(nil), // 18: remote.IndexRangeReply (*HistoryRangeReq)(nil), // 19: remote.HistoryRangeReq @@ -2090,7 +2091,7 @@ var file_remote_kv_proto_depIdxs = []int32{ 10, // 12: remote.KV.Snapshots:input_type -> remote.SnapshotsRequest 12, // 13: remote.KV.Range:input_type -> remote.RangeReq 13, // 14: remote.KV.DomainGet:input_type -> remote.DomainGetReq - 15, // 15: remote.KV.HistoryGet:input_type -> remote.HistoryGetReq + 15, // 15: remote.KV.HistorySeek:input_type -> remote.HistorySeekReq 17, // 16: remote.KV.IndexRange:input_type -> remote.IndexRangeReq 19, // 17: remote.KV.HistoryRange:input_type -> remote.HistoryRangeReq 20, // 18: remote.KV.DomainRange:input_type -> remote.DomainRangeReq @@ -2100,7 +2101,7 @@ var file_remote_kv_proto_depIdxs = []int32{ 11, // 22: remote.KV.Snapshots:output_type -> remote.SnapshotsReply 21, // 23: remote.KV.Range:output_type -> remote.Pairs 14, // 24: remote.KV.DomainGet:output_type -> remote.DomainGetReply - 16, // 25: remote.KV.HistoryGet:output_type -> remote.HistoryGetReply + 16, // 25: remote.KV.HistorySeek:output_type -> remote.HistorySeekReply 18, // 26: remote.KV.IndexRange:output_type -> remote.IndexRangeReply 21, // 27: remote.KV.HistoryRange:output_type -> remote.Pairs 21, // 28: remote.KV.DomainRange:output_type -> remote.Pairs @@ -2262,7 +2263,7 @@ func file_remote_kv_proto_init() { } } file_remote_kv_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HistoryGetReq); i { + switch v := v.(*HistorySeekReq); i { case 0: return &v.state case 1: @@ -2274,7 +2275,7 @@ func file_remote_kv_proto_init() { } } file_remote_kv_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HistoryGetReply); i { + switch v := v.(*HistorySeekReply); i { case 0: return &v.state case 1: diff --git a/erigon-lib/gointerfaces/remoteproto/kv_client_mock.go b/erigon-lib/gointerfaces/remoteproto/kv_client_mock.go index 73d1f3e9373..75176766b2f 100644 --- a/erigon-lib/gointerfaces/remoteproto/kv_client_mock.go +++ b/erigon-lib/gointerfaces/remoteproto/kv_client_mock.go @@ -130,90 +130,90 @@ func (c *MockKVClientDomainRangeCall) DoAndReturn(f func(context.Context, *Domai return c } -// HistoryGet mocks base method. -func (m *MockKVClient) HistoryGet(arg0 context.Context, arg1 *HistoryGetReq, arg2 ...grpc.CallOption) (*HistoryGetReply, error) { +// HistoryRange mocks base method. +func (m *MockKVClient) HistoryRange(arg0 context.Context, arg1 *HistoryRangeReq, arg2 ...grpc.CallOption) (*Pairs, error) { m.ctrl.T.Helper() varargs := []any{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } - ret := m.ctrl.Call(m, "HistoryGet", varargs...) - ret0, _ := ret[0].(*HistoryGetReply) + ret := m.ctrl.Call(m, "HistoryRange", varargs...) + ret0, _ := ret[0].(*Pairs) ret1, _ := ret[1].(error) return ret0, ret1 } -// HistoryGet indicates an expected call of HistoryGet. -func (mr *MockKVClientMockRecorder) HistoryGet(arg0, arg1 any, arg2 ...any) *MockKVClientHistoryGetCall { +// HistoryRange indicates an expected call of HistoryRange. +func (mr *MockKVClientMockRecorder) HistoryRange(arg0, arg1 any, arg2 ...any) *MockKVClientHistoryRangeCall { mr.mock.ctrl.T.Helper() varargs := append([]any{arg0, arg1}, arg2...) - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HistoryGet", reflect.TypeOf((*MockKVClient)(nil).HistoryGet), varargs...) - return &MockKVClientHistoryGetCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HistoryRange", reflect.TypeOf((*MockKVClient)(nil).HistoryRange), varargs...) + return &MockKVClientHistoryRangeCall{Call: call} } -// MockKVClientHistoryGetCall wrap *gomock.Call -type MockKVClientHistoryGetCall struct { +// MockKVClientHistoryRangeCall wrap *gomock.Call +type MockKVClientHistoryRangeCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockKVClientHistoryGetCall) Return(arg0 *HistoryGetReply, arg1 error) *MockKVClientHistoryGetCall { +func (c *MockKVClientHistoryRangeCall) Return(arg0 *Pairs, arg1 error) *MockKVClientHistoryRangeCall { c.Call = c.Call.Return(arg0, arg1) return c } // Do rewrite *gomock.Call.Do -func (c *MockKVClientHistoryGetCall) Do(f func(context.Context, *HistoryGetReq, ...grpc.CallOption) (*HistoryGetReply, error)) *MockKVClientHistoryGetCall { +func (c *MockKVClientHistoryRangeCall) Do(f func(context.Context, *HistoryRangeReq, ...grpc.CallOption) (*Pairs, error)) *MockKVClientHistoryRangeCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockKVClientHistoryGetCall) DoAndReturn(f func(context.Context, *HistoryGetReq, ...grpc.CallOption) (*HistoryGetReply, error)) *MockKVClientHistoryGetCall { +func (c *MockKVClientHistoryRangeCall) DoAndReturn(f func(context.Context, *HistoryRangeReq, ...grpc.CallOption) (*Pairs, error)) *MockKVClientHistoryRangeCall { c.Call = c.Call.DoAndReturn(f) return c } -// HistoryRange mocks base method. -func (m *MockKVClient) HistoryRange(arg0 context.Context, arg1 *HistoryRangeReq, arg2 ...grpc.CallOption) (*Pairs, error) { +// HistorySeek mocks base method. +func (m *MockKVClient) HistorySeek(arg0 context.Context, arg1 *HistorySeekReq, arg2 ...grpc.CallOption) (*HistorySeekReply, error) { m.ctrl.T.Helper() varargs := []any{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } - ret := m.ctrl.Call(m, "HistoryRange", varargs...) - ret0, _ := ret[0].(*Pairs) + ret := m.ctrl.Call(m, "HistorySeek", varargs...) + ret0, _ := ret[0].(*HistorySeekReply) ret1, _ := ret[1].(error) return ret0, ret1 } -// HistoryRange indicates an expected call of HistoryRange. -func (mr *MockKVClientMockRecorder) HistoryRange(arg0, arg1 any, arg2 ...any) *MockKVClientHistoryRangeCall { +// HistorySeek indicates an expected call of HistorySeek. +func (mr *MockKVClientMockRecorder) HistorySeek(arg0, arg1 any, arg2 ...any) *MockKVClientHistorySeekCall { mr.mock.ctrl.T.Helper() varargs := append([]any{arg0, arg1}, arg2...) - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HistoryRange", reflect.TypeOf((*MockKVClient)(nil).HistoryRange), varargs...) - return &MockKVClientHistoryRangeCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HistorySeek", reflect.TypeOf((*MockKVClient)(nil).HistorySeek), varargs...) + return &MockKVClientHistorySeekCall{Call: call} } -// MockKVClientHistoryRangeCall wrap *gomock.Call -type MockKVClientHistoryRangeCall struct { +// MockKVClientHistorySeekCall wrap *gomock.Call +type MockKVClientHistorySeekCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockKVClientHistoryRangeCall) Return(arg0 *Pairs, arg1 error) *MockKVClientHistoryRangeCall { +func (c *MockKVClientHistorySeekCall) Return(arg0 *HistorySeekReply, arg1 error) *MockKVClientHistorySeekCall { c.Call = c.Call.Return(arg0, arg1) return c } // Do rewrite *gomock.Call.Do -func (c *MockKVClientHistoryRangeCall) Do(f func(context.Context, *HistoryRangeReq, ...grpc.CallOption) (*Pairs, error)) *MockKVClientHistoryRangeCall { +func (c *MockKVClientHistorySeekCall) Do(f func(context.Context, *HistorySeekReq, ...grpc.CallOption) (*HistorySeekReply, error)) *MockKVClientHistorySeekCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockKVClientHistoryRangeCall) DoAndReturn(f func(context.Context, *HistoryRangeReq, ...grpc.CallOption) (*Pairs, error)) *MockKVClientHistoryRangeCall { +func (c *MockKVClientHistorySeekCall) DoAndReturn(f func(context.Context, *HistorySeekReq, ...grpc.CallOption) (*HistorySeekReply, error)) *MockKVClientHistorySeekCall { c.Call = c.Call.DoAndReturn(f) return c } diff --git a/erigon-lib/gointerfaces/remoteproto/kv_grpc.pb.go b/erigon-lib/gointerfaces/remoteproto/kv_grpc.pb.go index 5bf34438ffd..5478e361d44 100644 --- a/erigon-lib/gointerfaces/remoteproto/kv_grpc.pb.go +++ b/erigon-lib/gointerfaces/remoteproto/kv_grpc.pb.go @@ -27,7 +27,7 @@ const ( KV_Snapshots_FullMethodName = "/remote.KV/Snapshots" KV_Range_FullMethodName = "/remote.KV/Range" KV_DomainGet_FullMethodName = "/remote.KV/DomainGet" - KV_HistoryGet_FullMethodName = "/remote.KV/HistoryGet" + KV_HistorySeek_FullMethodName = "/remote.KV/HistorySeek" KV_IndexRange_FullMethodName = "/remote.KV/IndexRange" KV_HistoryRange_FullMethodName = "/remote.KV/HistoryRange" KV_DomainRange_FullMethodName = "/remote.KV/DomainRange" @@ -55,7 +55,7 @@ type KVClient interface { Range(ctx context.Context, in *RangeReq, opts ...grpc.CallOption) (*Pairs, error) // Temporal methods DomainGet(ctx context.Context, in *DomainGetReq, opts ...grpc.CallOption) (*DomainGetReply, error) - HistoryGet(ctx context.Context, in *HistoryGetReq, opts ...grpc.CallOption) (*HistoryGetReply, error) + HistorySeek(ctx context.Context, in *HistorySeekReq, opts ...grpc.CallOption) (*HistorySeekReply, error) IndexRange(ctx context.Context, in *IndexRangeReq, opts ...grpc.CallOption) (*IndexRangeReply, error) HistoryRange(ctx context.Context, in *HistoryRangeReq, opts ...grpc.CallOption) (*Pairs, error) DomainRange(ctx context.Context, in *DomainRangeReq, opts ...grpc.CallOption) (*Pairs, error) @@ -168,9 +168,9 @@ func (c *kVClient) DomainGet(ctx context.Context, in *DomainGetReq, opts ...grpc return out, nil } -func (c *kVClient) HistoryGet(ctx context.Context, in *HistoryGetReq, opts ...grpc.CallOption) (*HistoryGetReply, error) { - out := new(HistoryGetReply) - err := c.cc.Invoke(ctx, KV_HistoryGet_FullMethodName, in, out, opts...) +func (c *kVClient) HistorySeek(ctx context.Context, in *HistorySeekReq, opts ...grpc.CallOption) (*HistorySeekReply, error) { + out := new(HistorySeekReply) + err := c.cc.Invoke(ctx, KV_HistorySeek_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -226,7 +226,7 @@ type KVServer interface { Range(context.Context, *RangeReq) (*Pairs, error) // Temporal methods DomainGet(context.Context, *DomainGetReq) (*DomainGetReply, error) - HistoryGet(context.Context, *HistoryGetReq) (*HistoryGetReply, error) + HistorySeek(context.Context, *HistorySeekReq) (*HistorySeekReply, error) IndexRange(context.Context, *IndexRangeReq) (*IndexRangeReply, error) HistoryRange(context.Context, *HistoryRangeReq) (*Pairs, error) DomainRange(context.Context, *DomainRangeReq) (*Pairs, error) @@ -255,8 +255,8 @@ func (UnimplementedKVServer) Range(context.Context, *RangeReq) (*Pairs, error) { func (UnimplementedKVServer) DomainGet(context.Context, *DomainGetReq) (*DomainGetReply, error) { return nil, status.Errorf(codes.Unimplemented, "method DomainGet not implemented") } -func (UnimplementedKVServer) HistoryGet(context.Context, *HistoryGetReq) (*HistoryGetReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method HistoryGet not implemented") +func (UnimplementedKVServer) HistorySeek(context.Context, *HistorySeekReq) (*HistorySeekReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method HistorySeek not implemented") } func (UnimplementedKVServer) IndexRange(context.Context, *IndexRangeReq) (*IndexRangeReply, error) { return nil, status.Errorf(codes.Unimplemented, "method IndexRange not implemented") @@ -399,20 +399,20 @@ func _KV_DomainGet_Handler(srv interface{}, ctx context.Context, dec func(interf return interceptor(ctx, in, info, handler) } -func _KV_HistoryGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HistoryGetReq) +func _KV_HistorySeek_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HistorySeekReq) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(KVServer).HistoryGet(ctx, in) + return srv.(KVServer).HistorySeek(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: KV_HistoryGet_FullMethodName, + FullMethod: KV_HistorySeek_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).HistoryGet(ctx, req.(*HistoryGetReq)) + return srv.(KVServer).HistorySeek(ctx, req.(*HistorySeekReq)) } return interceptor(ctx, in, info, handler) } @@ -495,8 +495,8 @@ var KV_ServiceDesc = grpc.ServiceDesc{ Handler: _KV_DomainGet_Handler, }, { - MethodName: "HistoryGet", - Handler: _KV_HistoryGet_Handler, + MethodName: "HistorySeek", + Handler: _KV_HistorySeek_Handler, }, { MethodName: "IndexRange", diff --git a/erigon-lib/kv/kv_interface.go b/erigon-lib/kv/kv_interface.go index 8770c759c75..1399d2a97d0 100644 --- a/erigon-lib/kv/kv_interface.go +++ b/erigon-lib/kv/kv_interface.go @@ -449,7 +449,7 @@ type BucketMigrator interface { // Cursor - class for navigating through a database // CursorDupSort are inherit this class // -// If methods (like First/Next/Seek) return error, then returned key SHOULD not be nil (can be []byte{} for example). +// If methods (like First/Next/seekInFiles) return error, then returned key SHOULD not be nil (can be []byte{} for example). // Then looping code will look as: // c := kv.Cursor(bucketName) // @@ -545,7 +545,7 @@ type TemporalTx interface { Tx TemporalGetter DomainGetAsOf(name Domain, k, k2 []byte, ts uint64) (v []byte, ok bool, err error) - HistoryGet(name History, k []byte, ts uint64) (v []byte, ok bool, err error) + HistorySeek(name History, k []byte, ts uint64) (v []byte, ok bool, err error) // IndexRange - return iterator over range of inverted index for given key `k` // Asc semantic: [from, to) AND from > to @@ -564,6 +564,12 @@ type TemporalTx interface { type TemporalCommitment interface { ComputeCommitment(ctx context.Context, saveStateAfter, trace bool) (rootHash []byte, err error) } + +type TemporalRwTx interface { + RwTx + TemporalTx +} + type TemporalPutDel interface { // DomainPut // Optimizations: diff --git a/erigon-lib/kv/mdbx/kv_abstract_test.go b/erigon-lib/kv/mdbx/kv_abstract_test.go index ec4451201b2..55b1a8b87e5 100644 --- a/erigon-lib/kv/mdbx/kv_abstract_test.go +++ b/erigon-lib/kv/mdbx/kv_abstract_test.go @@ -537,7 +537,7 @@ func testMultiCursor(t *testing.T, db kv.RwDB, bucket1, bucket2 string) { // } // // c3 := tx.Cursor(dbutils.ChaindataTables[0]) -// k, v, err := c3.Seek([]byte{5}) +// k, v, err := c3.seekInFiles([]byte{5}) // if err != nil { // return err // } diff --git a/erigon-lib/kv/mdbx/kv_mdbx.go b/erigon-lib/kv/mdbx/kv_mdbx.go index d9ec0119400..652774439fd 100644 --- a/erigon-lib/kv/mdbx/kv_mdbx.go +++ b/erigon-lib/kv/mdbx/kv_mdbx.go @@ -1446,7 +1446,7 @@ func (c *MdbxCursor) Seek(seek []byte) (k, v []byte, err error) { if mdbx.IsNotFound(err) { return nil, nil, nil } - err = fmt.Errorf("failed MdbxKV cursor.Seek(): %w, bucket: %s, key: %x", err, c.bucketName, seek) + err = fmt.Errorf("failed MdbxKV cursor.seekInFiles(): %w, bucket: %s, key: %x", err, c.bucketName, seek) return []byte{}, nil, err } diff --git a/erigon-lib/kv/membatchwithdb/memory_mutation.go b/erigon-lib/kv/membatchwithdb/memory_mutation.go index 50ba0f6e3cf..63d32895e31 100644 --- a/erigon-lib/kv/membatchwithdb/memory_mutation.go +++ b/erigon-lib/kv/membatchwithdb/memory_mutation.go @@ -731,8 +731,8 @@ func (m *MemoryMutation) DomainGet(name kv.Domain, k, k2 []byte) (v []byte, step func (m *MemoryMutation) DomainGetAsOf(name kv.Domain, k, k2 []byte, ts uint64) (v []byte, ok bool, err error) { return m.db.(kv.TemporalTx).DomainGetAsOf(name, k, k2, ts) } -func (m *MemoryMutation) HistoryGet(name kv.History, k []byte, ts uint64) (v []byte, ok bool, err error) { - return m.db.(kv.TemporalTx).HistoryGet(name, k, ts) +func (m *MemoryMutation) HistorySeek(name kv.History, k []byte, ts uint64) (v []byte, ok bool, err error) { + return m.db.(kv.TemporalTx).HistorySeek(name, k, ts) } func (m *MemoryMutation) IndexRange(name kv.InvertedIdx, k []byte, fromTs, toTs int, asc order.By, limit int) (timestamps iter.U64, err error) { diff --git a/erigon-lib/kv/remotedb/kv_remote.go b/erigon-lib/kv/remotedb/kv_remote.go index dd4fdc48491..0119ac85e14 100644 --- a/erigon-lib/kv/remotedb/kv_remote.go +++ b/erigon-lib/kv/remotedb/kv_remote.go @@ -549,7 +549,7 @@ func (c *remoteCursor) Current() ([]byte, []byte, error) { return c.getCurrent() } -// Seek - doesn't start streaming (because much of code does only several .Seek calls without reading sequence of data) +// Seek - doesn't start streaming (because much of code does only several .seekInFiles calls without reading sequence of data) // .Next() - does request streaming (if configured by user) func (c *remoteCursor) Seek(seek []byte) ([]byte, []byte, error) { return c.setRange(seek) @@ -674,8 +674,8 @@ func (tx *tx) DomainRange(name kv.Domain, fromKey, toKey []byte, ts uint64, asc return reply.Keys, reply.Values, reply.NextPageToken, nil }), nil } -func (tx *tx) HistoryGet(name kv.History, k []byte, ts uint64) (v []byte, ok bool, err error) { - reply, err := tx.db.remoteKV.HistoryGet(tx.ctx, &remote.HistoryGetReq{TxId: tx.id, Table: string(name), K: k, Ts: ts}) +func (tx *tx) HistorySeek(name kv.History, k []byte, ts uint64) (v []byte, ok bool, err error) { + reply, err := tx.db.remoteKV.HistorySeek(tx.ctx, &remote.HistorySeekReq{TxId: tx.id, Table: string(name), K: k, Ts: ts}) if err != nil { return nil, false, err } diff --git a/erigon-lib/kv/remotedbserver/remotedbserver.go b/erigon-lib/kv/remotedbserver/remotedbserver.go index f4908cd734b..1cd23b95fdd 100644 --- a/erigon-lib/kv/remotedbserver/remotedbserver.go +++ b/erigon-lib/kv/remotedbserver/remotedbserver.go @@ -62,7 +62,7 @@ const MaxTxTTL = 60 * time.Second // 5.0 - BlockTransaction table now has canonical ids (txs of non-canonical blocks moving to NonCanonicalTransaction table) // 5.1.0 - Added blockGasLimit to the StateChangeBatch // 6.0.0 - Blocks now have system-txs - in the begin/end of block -// 6.1.0 - Add methods Range, IndexRange, HistoryGet, HistoryRange +// 6.1.0 - Add methods Range, IndexRange, HistorySeek, HistoryRange // 6.2.0 - Add HistoryFiles to reply of Snapshots() method var KvServiceAPIVersion = &types.VersionReply{Major: 6, Minor: 2, Patch: 0} @@ -561,14 +561,14 @@ func (s *KvServer) DomainGet(_ context.Context, req *remote.DomainGetReq) (reply } return reply, nil } -func (s *KvServer) HistoryGet(_ context.Context, req *remote.HistoryGetReq) (reply *remote.HistoryGetReply, err error) { - reply = &remote.HistoryGetReply{} +func (s *KvServer) HistorySeek(_ context.Context, req *remote.HistorySeekReq) (reply *remote.HistorySeekReply, err error) { + reply = &remote.HistorySeekReply{} if err := s.with(req.TxId, func(tx kv.Tx) error { ttx, ok := tx.(kv.TemporalTx) if !ok { return fmt.Errorf("server DB doesn't implement kv.Temporal interface") } - reply.V, reply.Ok, err = ttx.HistoryGet(kv.History(req.Table), req.K, req.Ts) + reply.V, reply.Ok, err = ttx.HistorySeek(kv.History(req.Table), req.K, req.Ts) if err != nil { return err } diff --git a/erigon-lib/kv/tables.go b/erigon-lib/kv/tables.go index 1898168c0cf..204fa32eec3 100644 --- a/erigon-lib/kv/tables.go +++ b/erigon-lib/kv/tables.go @@ -126,12 +126,12 @@ AccountsHistory and StorageHistory - indices designed to serve next 2 type of re 2. get last shard of A - to append there new block numbers Task 1. is part of "get historical state" operation (see `core/state:GetAsOf`): -If `db.Seek(A+bigEndian(X))` returns non-last shard - +If `db.seekInFiles(A+bigEndian(X))` returns non-last shard - then get block number from shard value Y := RoaringBitmap(shard_value).GetGte(X) and with Y go to ChangeSets: db.Get(ChangeSets, Y+A) -If `db.Seek(A+bigEndian(X))` returns last shard - +If `db.seekInFiles(A+bigEndian(X))` returns last shard - then we go to PlainState: db.Get(PlainState, A) @@ -143,7 +143,7 @@ Format: - if shard is last - then key has suffix 8 bytes = 0xFF It allows: - - server task 1. by 1 db operation db.Seek(A+bigEndian(X)) + - server task 1. by 1 db operation db.seekInFiles(A+bigEndian(X)) - server task 2. by 1 db operation db.Get(A+0xFF) see also: docs/programmers_guide/db_walkthrough.MD#table-change-sets diff --git a/erigon-lib/kv/temporal/kv_temporal.go b/erigon-lib/kv/temporal/kv_temporal.go index 03077f7dfab..a15c91c1ada 100644 --- a/erigon-lib/kv/temporal/kv_temporal.go +++ b/erigon-lib/kv/temporal/kv_temporal.go @@ -211,8 +211,8 @@ func (tx *Tx) DomainGetAsOf(name kv.Domain, key, key2 []byte, ts uint64) (v []by return tx.aggCtx.DomainGetAsOf(tx.MdbxTx, name, key, ts) } -func (tx *Tx) HistoryGet(name kv.History, key []byte, ts uint64) (v []byte, ok bool, err error) { - return tx.aggCtx.HistoryGet(name, key, ts, tx.MdbxTx) +func (tx *Tx) HistorySeek(name kv.History, key []byte, ts uint64) (v []byte, ok bool, err error) { + return tx.aggCtx.HistorySeek(name, key, ts, tx.MdbxTx) } func (tx *Tx) IndexRange(name kv.InvertedIdx, k []byte, fromTs, toTs int, asc order.By, limit int) (timestamps iter.U64, err error) { diff --git a/erigon-lib/state/aggregator.go b/erigon-lib/state/aggregator.go index 380d2caf91e..f735cf161dd 100644 --- a/erigon-lib/state/aggregator.go +++ b/erigon-lib/state/aggregator.go @@ -1603,10 +1603,10 @@ func (ac *AggregatorRoTx) IndexRange(name kv.InvertedIdx, k []byte, fromTs, toTs // -- range end -func (ac *AggregatorRoTx) HistoryGet(name kv.History, key []byte, ts uint64, tx kv.Tx) (v []byte, ok bool, err error) { +func (ac *AggregatorRoTx) HistorySeek(name kv.History, key []byte, ts uint64, tx kv.Tx) (v []byte, ok bool, err error) { switch name { case kv.AccountsHistory: - v, ok, err = ac.d[kv.AccountsDomain].ht.GetNoStateWithRecent(key, ts, tx) + v, ok, err = ac.d[kv.AccountsDomain].ht.HistorySeek(key, ts, tx) if err != nil { return nil, false, err } @@ -1615,13 +1615,13 @@ func (ac *AggregatorRoTx) HistoryGet(name kv.History, key []byte, ts uint64, tx } return v, true, nil case kv.StorageHistory: - return ac.d[kv.StorageDomain].ht.GetNoStateWithRecent(key, ts, tx) + return ac.d[kv.StorageDomain].ht.HistorySeek(key, ts, tx) case kv.CodeHistory: - return ac.d[kv.CodeDomain].ht.GetNoStateWithRecent(key, ts, tx) + return ac.d[kv.CodeDomain].ht.HistorySeek(key, ts, tx) case kv.CommitmentHistory: - return ac.d[kv.CommitmentDomain].ht.GetNoStateWithRecent(key, ts, tx) + return ac.d[kv.CommitmentDomain].ht.HistorySeek(key, ts, tx) //case kv.GasUsedHistory: - // return ac.d[kv.GasUsedDomain].ht.GetNoStateWithRecent(key, ts, tx) + // return ac.d[kv.GasUsedDomain].ht.HistorySeek(key, ts, tx) default: panic(fmt.Sprintf("unexpected: %s", name)) } diff --git a/erigon-lib/state/bps_tree.go b/erigon-lib/state/bps_tree.go index b8502e488de..53f4f8c85b2 100644 --- a/erigon-lib/state/bps_tree.go +++ b/erigon-lib/state/bps_tree.go @@ -4,6 +4,7 @@ import ( "bytes" "errors" "fmt" + "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/recsplit/eliasfano32" ) @@ -12,7 +13,7 @@ import ( type indexSeeker interface { WarmUp(g ArchiveGetter) error Get(g ArchiveGetter, key []byte) (k []byte, found bool, di uint64, err error) - //Seek(g ArchiveGetter, key []byte) (indexSeekerIterator, error) + //seekInFiles(g ArchiveGetter, key []byte) (indexSeekerIterator, error) Seek(g ArchiveGetter, seek []byte) (k []byte, di uint64, found bool, err error) } diff --git a/erigon-lib/state/btree_index.go b/erigon-lib/state/btree_index.go index 3739d246233..13724222487 100644 --- a/erigon-lib/state/btree_index.go +++ b/erigon-lib/state/btree_index.go @@ -734,7 +734,7 @@ type BtIndex struct { filePath string } -// Decompressor should be managed by caller (could be closed after index is built). When index is built, external getter should be passed to Seek function +// Decompressor should be managed by caller (could be closed after index is built). When index is built, external getter should be passed to seekInFiles function func CreateBtreeIndexWithDecompressor(indexPath string, M uint64, decompressor *seg.Decompressor, compressed FileCompression, seed uint32, ps *background.ProgressSet, tmpdir string, logger log.Logger, noFsync bool) (*BtIndex, error) { err := BuildBtreeIndexWithDecompressor(indexPath, decompressor, compressed, ps, tmpdir, seed, logger, noFsync) if err != nil { @@ -1022,7 +1022,7 @@ func (b *BtIndex) Seek(g ArchiveGetter, x []byte) (*Cursor, error) { } // defer func() { - // fmt.Printf("[Bindex][%s] Seek '%x' -> '%x' di=%d\n", b.FileName(), x, cursor.Value(), cursor.d) + // fmt.Printf("[Bindex][%s] seekInFiles '%x' -> '%x' di=%d\n", b.FileName(), x, cursor.Value(), cursor.d) // }() var ( k []byte diff --git a/erigon-lib/state/domain.go b/erigon-lib/state/domain.go index e82e77bbf63..2eef13d54de 100644 --- a/erigon-lib/state/domain.go +++ b/erigon-lib/state/domain.go @@ -1360,7 +1360,7 @@ func (dt *DomainRoTx) getFromFiles(filekey []byte) (v []byte, found bool, fileSt // GetAsOf does not always require usage of roTx. If it is possible to determine // historical value based only on static files, roTx will not be used. func (dt *DomainRoTx) GetAsOf(key []byte, txNum uint64, roTx kv.Tx) ([]byte, error) { - v, hOk, err := dt.ht.GetNoStateWithRecent(key, txNum, roTx) + v, hOk, err := dt.ht.HistorySeek(key, txNum, roTx) if err != nil { return nil, err } diff --git a/erigon-lib/state/domain_shared.go b/erigon-lib/state/domain_shared.go index 27461fcce18..faef82b1439 100644 --- a/erigon-lib/state/domain_shared.go +++ b/erigon-lib/state/domain_shared.go @@ -1207,7 +1207,7 @@ func (sdc *SharedDomainsCommitmentContext) LatestCommitmentState(tx kv.Tx, cd *D } txn, _ := _decodeTxBlockNums(value) - //fmt.Printf("[commitment] Seek found committed txn %d block %d\n", txn, bn) + //fmt.Printf("[commitment] seekInFiles found committed txn %d block %d\n", txn, bn) if txn >= sinceTx && txn <= untilTx { state = value } diff --git a/erigon-lib/state/domain_shared_bench_test.go b/erigon-lib/state/domain_shared_bench_test.go index ed44bf58a57..0bbabd84ab1 100644 --- a/erigon-lib/state/domain_shared_bench_test.go +++ b/erigon-lib/state/domain_shared_bench_test.go @@ -90,7 +90,7 @@ func Benchmark_SharedDomains_GetLatest(t *testing.B) { for ik := 0; ik < t.N; ik++ { for i := 0; i < len(keys); i++ { ts := uint64(rnd.Intn(int(maxTx))) - v, ok, err := ac2.HistoryGet(kv.AccountsHistory, keys[i], ts, rwTx) + v, ok, err := ac2.HistorySeek(kv.AccountsHistory, keys[i], ts, rwTx) require.True(t, ok) require.NotNil(t, v) diff --git a/erigon-lib/state/history.go b/erigon-lib/state/history.go index 71987862344..0ce48c959b0 100644 --- a/erigon-lib/state/history.go +++ b/erigon-lib/state/history.go @@ -1157,10 +1157,10 @@ func (ht *HistoryRoTx) getFile(txNum uint64) (it ctxItem, ok bool) { return it, false } -func (ht *HistoryRoTx) GetNoState(key []byte, txNum uint64) ([]byte, bool, error) { +func (ht *HistoryRoTx) historySeekInFiles(key []byte, txNum uint64) ([]byte, bool, error) { // Files list of II and History is different // it means II can't return index of file, but can return TxNum which History will use to find own file - ok, histTxNum := ht.iit.Seek(key, txNum) + ok, histTxNum := ht.iit.seekInFiles(key, txNum) if !ok { return nil, false, nil } @@ -1187,7 +1187,7 @@ func (ht *HistoryRoTx) GetNoState(key []byte, txNum uint64) ([]byte, bool, error } func (hs *HistoryStep) GetNoState(key []byte, txNum uint64) ([]byte, bool, uint64) { - //fmt.Printf("GetNoState [%x] %d\n", key, txNum) + //fmt.Printf("historySeekInFiles [%x] %d\n", key, txNum) if hs.indexFile.reader.Empty() { return nil, false, txNum } @@ -1252,10 +1252,10 @@ func (ht *HistoryRoTx) encodeTs(txNum uint64) []byte { return ht._bufTs } -// GetNoStateWithRecent searches history for a value of specified key before txNum +// HistorySeek searches history for a value of specified key before txNum // second return value is true if the value is found in the history (even if it is nil) -func (ht *HistoryRoTx) GetNoStateWithRecent(key []byte, txNum uint64, roTx kv.Tx) ([]byte, bool, error) { - v, ok, err := ht.GetNoState(key, txNum) +func (ht *HistoryRoTx) HistorySeek(key []byte, txNum uint64, roTx kv.Tx) ([]byte, bool, error) { + v, ok, err := ht.historySeekInFiles(key, txNum) if err != nil { return nil, ok, err } @@ -1263,7 +1263,7 @@ func (ht *HistoryRoTx) GetNoStateWithRecent(key []byte, txNum uint64, roTx kv.Tx return v, true, nil } - return ht.getNoStateFromDB(key, txNum, roTx) + return ht.historySeekInDB(key, txNum, roTx) } func (ht *HistoryRoTx) valsCursor(tx kv.Tx) (c kv.Cursor, err error) { @@ -1287,7 +1287,7 @@ func (ht *HistoryRoTx) valsCursorDup(tx kv.Tx) (c kv.CursorDupSort, err error) { return ht.valsCDup, nil } -func (ht *HistoryRoTx) getNoStateFromDB(key []byte, txNum uint64, tx kv.Tx) ([]byte, bool, error) { +func (ht *HistoryRoTx) historySeekInDB(key []byte, txNum uint64, tx kv.Tx) ([]byte, bool, error) { if ht.h.historyLargeValues { c, err := ht.valsCursor(tx) if err != nil { @@ -1401,7 +1401,7 @@ func (hi *StateAsOfIterF) advanceInFiles() error { } } - if hi.from != nil && bytes.Compare(key, hi.from) < 0 { //TODO: replace by Seek() + if hi.from != nil && bytes.Compare(key, hi.from) < 0 { //TODO: replace by seekInFiles() continue } @@ -1670,6 +1670,74 @@ func (ht *HistoryRoTx) HistoryRange(fromTxNum, toTxNum int, asc order.By, limit return iter.MergeKVS(itOnDB, itOnFiles, limit), nil } +func (ht *HistoryRoTx) idxRangeRecent(key []byte, startTxNum, endTxNum int, asc order.By, limit int, roTx kv.Tx) (iter.U64, error) { + var dbIt iter.U64 + if ht.h.historyLargeValues { + from := make([]byte, len(key)+8) + copy(from, key) + var fromTxNum uint64 + if startTxNum >= 0 { + fromTxNum = uint64(startTxNum) + } + binary.BigEndian.PutUint64(from[len(key):], fromTxNum) + to := common.Copy(from) + toTxNum := uint64(math.MaxUint64) + if endTxNum >= 0 { + toTxNum = uint64(endTxNum) + } + binary.BigEndian.PutUint64(to[len(key):], toTxNum) + var it iter.KV + var err error + if asc { + it, err = roTx.RangeAscend(ht.h.historyValsTable, from, to, limit) + } else { + it, err = roTx.RangeDescend(ht.h.historyValsTable, from, to, limit) + } + if err != nil { + return nil, err + } + dbIt = iter.TransformKV2U64(it, func(k, v []byte) (uint64, error) { + if len(k) < 8 { + return 0, fmt.Errorf("unexpected large key length %d", len(k)) + } + return binary.BigEndian.Uint64(k[len(k)-8:]), nil + }) + } else { + var from, to []byte + if startTxNum >= 0 { + from = make([]byte, 8) + binary.BigEndian.PutUint64(from, uint64(startTxNum)) + } + if endTxNum >= 0 { + to = make([]byte, 8) + binary.BigEndian.PutUint64(to, uint64(endTxNum)) + } + it, err := roTx.RangeDupSort(ht.h.historyValsTable, key, from, to, asc, limit) + if err != nil { + return nil, err + } + dbIt = iter.TransformKV2U64(it, func(k, v []byte) (uint64, error) { + if len(v) < 8 { + return 0, fmt.Errorf("unexpected small value length %d", len(v)) + } + return binary.BigEndian.Uint64(v), nil + }) + } + + return dbIt, nil +} +func (ht *HistoryRoTx) IdxRange(key []byte, startTxNum, endTxNum int, asc order.By, limit int, roTx kv.Tx) (iter.U64, error) { + frozenIt, err := ht.iit.iterateRangeFrozen(key, startTxNum, endTxNum, asc, limit) + if err != nil { + return nil, err + } + recentIt, err := ht.idxRangeRecent(key, startTxNum, endTxNum, asc, limit, roTx) + if err != nil { + return nil, err + } + return iter.Union[uint64](frozenIt, recentIt, asc, limit), nil +} + type HistoryChangesIterFiles struct { hc *HistoryRoTx nextVal []byte @@ -2010,71 +2078,3 @@ func (hs *HistoryStep) Clone() *HistoryStep { }, } } - -func (ht *HistoryRoTx) idxRangeRecent(key []byte, startTxNum, endTxNum int, asc order.By, limit int, roTx kv.Tx) (iter.U64, error) { - var dbIt iter.U64 - if ht.h.historyLargeValues { - from := make([]byte, len(key)+8) - copy(from, key) - var fromTxNum uint64 - if startTxNum >= 0 { - fromTxNum = uint64(startTxNum) - } - binary.BigEndian.PutUint64(from[len(key):], fromTxNum) - to := common.Copy(from) - toTxNum := uint64(math.MaxUint64) - if endTxNum >= 0 { - toTxNum = uint64(endTxNum) - } - binary.BigEndian.PutUint64(to[len(key):], toTxNum) - var it iter.KV - var err error - if asc { - it, err = roTx.RangeAscend(ht.h.historyValsTable, from, to, limit) - } else { - it, err = roTx.RangeDescend(ht.h.historyValsTable, from, to, limit) - } - if err != nil { - return nil, err - } - dbIt = iter.TransformKV2U64(it, func(k, v []byte) (uint64, error) { - if len(k) < 8 { - return 0, fmt.Errorf("unexpected large key length %d", len(k)) - } - return binary.BigEndian.Uint64(k[len(k)-8:]), nil - }) - } else { - var from, to []byte - if startTxNum >= 0 { - from = make([]byte, 8) - binary.BigEndian.PutUint64(from, uint64(startTxNum)) - } - if endTxNum >= 0 { - to = make([]byte, 8) - binary.BigEndian.PutUint64(to, uint64(endTxNum)) - } - it, err := roTx.RangeDupSort(ht.h.historyValsTable, key, from, to, asc, limit) - if err != nil { - return nil, err - } - dbIt = iter.TransformKV2U64(it, func(k, v []byte) (uint64, error) { - if len(v) < 8 { - return 0, fmt.Errorf("unexpected small value length %d", len(v)) - } - return binary.BigEndian.Uint64(v), nil - }) - } - - return dbIt, nil -} -func (ht *HistoryRoTx) IdxRange(key []byte, startTxNum, endTxNum int, asc order.By, limit int, roTx kv.Tx) (iter.U64, error) { - frozenIt, err := ht.iit.iterateRangeFrozen(key, startTxNum, endTxNum, asc, limit) - if err != nil { - return nil, err - } - recentIt, err := ht.idxRangeRecent(key, startTxNum, endTxNum, asc, limit, roTx) - if err != nil { - return nil, err - } - return iter.Union[uint64](frozenIt, recentIt, asc, limit), nil -} diff --git a/erigon-lib/state/history_test.go b/erigon-lib/state/history_test.go index a7b03759f9e..00f936b9073 100644 --- a/erigon-lib/state/history_test.go +++ b/erigon-lib/state/history_test.go @@ -622,7 +622,7 @@ func checkHistoryHistory(t *testing.T, h *History, txs uint64) { binary.BigEndian.PutUint64(k[:], keyNum) binary.BigEndian.PutUint64(v[:], valNum) k[0], v[0] = 0x01, 0xff - val, ok, err := hc.GetNoState(k[:], txNum+1) + val, ok, err := hc.historySeekInFiles(k[:], txNum+1) //require.Equal(t, ok, txNum < 976) if ok { require.NoError(t, err, label) @@ -1088,15 +1088,15 @@ func TestIterateChanged2(t *testing.T) { require.NoError(err) defer tx.Rollback() - v, ok, err := hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 900, tx) + v, ok, err := hc.HistorySeek(hexutility.MustDecodeHex("0100000000000001"), 900, tx) require.NoError(err) require.True(ok) require.Equal(hexutility.MustDecodeHex("ff00000000000383"), v) - v, ok, err = hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 0, tx) + v, ok, err = hc.HistorySeek(hexutility.MustDecodeHex("0100000000000001"), 0, tx) require.NoError(err) require.True(ok) require.Equal([]byte{}, v) - v, ok, err = hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 1000, tx) + v, ok, err = hc.HistorySeek(hexutility.MustDecodeHex("0100000000000001"), 1000, tx) require.NoError(err) require.True(ok) require.Equal(hexutility.MustDecodeHex("ff000000000003e7"), v) @@ -1142,15 +1142,15 @@ func TestIterateChanged2(t *testing.T) { require.NoError(err) defer tx.Rollback() - v, ok, err := hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 900, tx) + v, ok, err := hc.HistorySeek(hexutility.MustDecodeHex("0100000000000001"), 900, tx) require.NoError(err) require.True(ok) require.Equal(hexutility.MustDecodeHex("ff00000000000383"), v) - v, ok, err = hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 0, tx) + v, ok, err = hc.HistorySeek(hexutility.MustDecodeHex("0100000000000001"), 0, tx) require.NoError(err) require.True(ok) require.Equal([]byte{}, v) - v, ok, err = hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 1000, tx) + v, ok, err = hc.HistorySeek(hexutility.MustDecodeHex("0100000000000001"), 1000, tx) require.NoError(err) require.True(ok) require.Equal(hexutility.MustDecodeHex("ff000000000003e7"), v) diff --git a/erigon-lib/state/inverted_index.go b/erigon-lib/state/inverted_index.go index 7e10eef076a..656b678d590 100644 --- a/erigon-lib/state/inverted_index.go +++ b/erigon-lib/state/inverted_index.go @@ -620,7 +620,7 @@ func (iit *InvertedIndexRoTx) statelessIdxReader(i int) *recsplit.IndexReader { return r } -func (iit *InvertedIndexRoTx) Seek(key []byte, txNum uint64) (found bool, equalOrHigherTxNum uint64) { +func (iit *InvertedIndexRoTx) seekInFiles(key []byte, txNum uint64) (found bool, equalOrHigherTxNum uint64) { hi, lo := iit.hashKey(key) for i := 0; i < len(iit.files); i++ { diff --git a/erigon-lib/types/txn.go b/erigon-lib/types/txn.go index 3784c9fd93b..af3e1123f41 100644 --- a/erigon-lib/types/txn.go +++ b/erigon-lib/types/txn.go @@ -1005,7 +1005,7 @@ func UnwrapTxPlayloadRlp(blobTxRlp []byte) ([]byte, error) { if err != nil { return nil, err } - blobTxRlp = blobTxRlp[dataposPrev-1 : datapos+datalen] // Seek left an extra-bit + blobTxRlp = blobTxRlp[dataposPrev-1 : datapos+datalen] // seekInFiles left an extra-bit blobTxRlp[0] = 0x3 // Include the prefix part of the rlp return blobTxRlp, nil diff --git a/eth/stagedsync/stage_custom_trace.go b/eth/stagedsync/stage_custom_trace.go index db739660161..a85064ad117 100644 --- a/eth/stagedsync/stage_custom_trace.go +++ b/eth/stagedsync/stage_custom_trace.go @@ -111,7 +111,7 @@ func SpawnCustomTrace(s *StageState, txc wrap.TxContainer, cfg CustomTraceCfg, c // if err != nil { // return err // } - // lastTotal, ok, err := tx.HistoryGet(kv.GasUsedHistory, key, lastTxNum) + // lastTotal, ok, err := tx.HistorySeek(kv.GasUsedHistory, key, lastTxNum) // if err != nil { // return err // } diff --git a/ethdb/walk.go b/ethdb/walk.go index 006b1785fce..d43135643d9 100644 --- a/ethdb/walk.go +++ b/ethdb/walk.go @@ -27,7 +27,7 @@ import ( // of composite storage key, but without // reconstructing the key // Instead, the key is split into two parts and -// functions `Seek` and `Next` deliver both +// functions `seekInFiles` and `Next` deliver both // parts as well as the corresponding value type splitCursor struct { c kv.Cursor // Unlerlying cursor diff --git a/p2p/enode/nodedb.go b/p2p/enode/nodedb.go index 1b9e8e2d5e8..15498b15686 100644 --- a/p2p/enode/nodedb.go +++ b/p2p/enode/nodedb.go @@ -564,7 +564,7 @@ func (db *DB) QuerySeeds(n int, maxAge time.Duration) []*Node { } seek: for seeks := 0; len(nodes) < n && seeks < n*5; seeks++ { - // Seek to a random entry. The first byte is incremented by a + // seekInFiles to a random entry. The first byte is incremented by a // random amount each time in order to increase the likelihood // of hitting all existing nodes in very small databases. ctr := id[0] diff --git a/polygon/p2p/service_mock.go b/polygon/p2p/service_mock.go index f06b58635c1..4414d4f171b 100644 --- a/polygon/p2p/service_mock.go +++ b/polygon/p2p/service_mock.go @@ -13,7 +13,7 @@ import ( context "context" reflect "reflect" - sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentryproto" + sentryproto "github.com/ledgerwatch/erigon-lib/gointerfaces/sentryproto" types "github.com/ledgerwatch/erigon/core/types" eth "github.com/ledgerwatch/erigon/eth/protocols/eth" gomock "go.uber.org/mock/gomock" @@ -570,7 +570,7 @@ func (c *MockServiceRegisterNewBlockObserverCall) DoAndReturn(f func(MessageObse } // RegisterPeerEventObserver mocks base method. -func (m *MockService) RegisterPeerEventObserver(observer MessageObserver[*sentry.PeerEvent]) UnregisterFunc { +func (m *MockService) RegisterPeerEventObserver(observer MessageObserver[*sentryproto.PeerEvent]) UnregisterFunc { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RegisterPeerEventObserver", observer) ret0, _ := ret[0].(UnregisterFunc) @@ -596,13 +596,13 @@ func (c *MockServiceRegisterPeerEventObserverCall) Return(arg0 UnregisterFunc) * } // Do rewrite *gomock.Call.Do -func (c *MockServiceRegisterPeerEventObserverCall) Do(f func(MessageObserver[*sentry.PeerEvent]) UnregisterFunc) *MockServiceRegisterPeerEventObserverCall { +func (c *MockServiceRegisterPeerEventObserverCall) Do(f func(MessageObserver[*sentryproto.PeerEvent]) UnregisterFunc) *MockServiceRegisterPeerEventObserverCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockServiceRegisterPeerEventObserverCall) DoAndReturn(f func(MessageObserver[*sentry.PeerEvent]) UnregisterFunc) *MockServiceRegisterPeerEventObserverCall { +func (c *MockServiceRegisterPeerEventObserverCall) DoAndReturn(f func(MessageObserver[*sentryproto.PeerEvent]) UnregisterFunc) *MockServiceRegisterPeerEventObserverCall { c.Call = c.Call.DoAndReturn(f) return c } diff --git a/turbo/jsonrpc/otterscan_contract_creator.go b/turbo/jsonrpc/otterscan_contract_creator.go index 2029cfc1135..409f7cfaba2 100644 --- a/turbo/jsonrpc/otterscan_contract_creator.go +++ b/turbo/jsonrpc/otterscan_contract_creator.go @@ -81,7 +81,7 @@ func (api *OtterscanAPIImpl) GetContractCreator(ctx context.Context, addr common continue } - v, ok, err := ttx.HistoryGet(kv.AccountsHistory, addr[:], txnID) + v, ok, err := ttx.HistorySeek(kv.AccountsHistory, addr[:], txnID) if err != nil { log.Error("Unexpected error, couldn't find changeset", "txNum", txnID, "addr", addr) return nil, err @@ -122,7 +122,7 @@ func (api *OtterscanAPIImpl) GetContractCreator(ctx context.Context, addr common // can be replaced by full-scan over ttx.HistoryRange([prevTxnID, nextTxnID])? idx := sort.Search(int(nextTxnID-prevTxnID), func(i int) bool { txnID := uint64(i) + prevTxnID - v, ok, err := ttx.HistoryGet(kv.AccountsHistory, addr[:], txnID) + v, ok, err := ttx.HistorySeek(kv.AccountsHistory, addr[:], txnID) if err != nil { log.Error("[rpc] Unexpected error, couldn't find changeset", "txNum", i, "addr", addr) panic(err) diff --git a/turbo/jsonrpc/otterscan_transaction_by_sender_and_nonce.go b/turbo/jsonrpc/otterscan_transaction_by_sender_and_nonce.go index 321aec8464b..1a04ec23211 100644 --- a/turbo/jsonrpc/otterscan_transaction_by_sender_and_nonce.go +++ b/turbo/jsonrpc/otterscan_transaction_by_sender_and_nonce.go @@ -45,7 +45,7 @@ func (api *OtterscanAPIImpl) GetTransactionBySenderAndNonce(ctx context.Context, continue } - v, ok, err := ttx.HistoryGet(kv.AccountsHistory, addr[:], txnID) + v, ok, err := ttx.HistorySeek(kv.AccountsHistory, addr[:], txnID) if err != nil { log.Error("Unexpected error, couldn't find changeset", "txNum", i, "addr", addr) return nil, err @@ -85,7 +85,7 @@ func (api *OtterscanAPIImpl) GetTransactionBySenderAndNonce(ctx context.Context, // can be replaced by full-scan over ttx.HistoryRange([prevTxnID, nextTxnID])? idx := sort.Search(int(nextTxnID-prevTxnID), func(i int) bool { txnID := uint64(i) + prevTxnID - v, ok, err := ttx.HistoryGet(kv.AccountsHistory, addr[:], txnID) + v, ok, err := ttx.HistorySeek(kv.AccountsHistory, addr[:], txnID) if err != nil { log.Error("[rpc] Unexpected error, couldn't find changeset", "txNum", i, "addr", addr) panic(err) diff --git a/turbo/trie/trie_root.go b/turbo/trie/trie_root.go index 6696ecaf954..62e7a579602 100644 --- a/turbo/trie/trie_root.go +++ b/turbo/trie/trie_root.go @@ -828,7 +828,7 @@ func (c *AccTrieCursor) _seek(seek []byte, withinPrefix []byte) (bool, error) { // optimistic .Next call, can use result in 2 cases: // - k is not child of current key // - looking for first child, means: c.childID[c.lvl] <= int16(bits.TrailingZeros16(c.hasTree[c.lvl])) - // otherwise do .Seek call + // otherwise do .seekInFiles call //k, v, err = c.c.Next() //if err != nil { // return false, err From 9df2928758d121fdd8ee3a948fa7e5d49bed2ed8 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Fri, 10 May 2024 13:13:26 +0700 Subject: [PATCH 13/48] e3: mainnet v4 1408steps (#10260) Pick https://github.com/ledgerwatch/erigon-snapshot/pull/161 --------- Co-authored-by: awskii --- erigon-lib/go.mod | 2 +- erigon-lib/go.sum | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/erigon-lib/go.mod b/erigon-lib/go.mod index 2d2a49177b6..27d6814cfcf 100644 --- a/erigon-lib/go.mod +++ b/erigon-lib/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/erigontech/mdbx-go v0.38.0 - github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509080801-633688230242 + github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510043237-3afa4a26e31d github.com/ledgerwatch/interfaces v0.0.0-20240510032129-13f644ca2307 github.com/ledgerwatch/log/v3 v3.9.0 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/erigon-lib/go.sum b/erigon-lib/go.sum index c02a381b093..822c6c02951 100644 --- a/erigon-lib/go.sum +++ b/erigon-lib/go.sum @@ -270,8 +270,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509080801-633688230242 h1:EvaRNRzZc8OXCWutZnq93kdSSP9AiVuozwI8nTjeEYA= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509080801-633688230242/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510043237-3afa4a26e31d h1:pOxi2nvq6A5cQ2k7n08tXTqVxNAvbUTuzF8bZhb6ooQ= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510043237-3afa4a26e31d/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/interfaces v0.0.0-20240510032129-13f644ca2307 h1:v2syJaHSCTSEnzwFUW4F6FL92ZAnKEoyBesnm2E/IEU= github.com/ledgerwatch/interfaces v0.0.0-20240510032129-13f644ca2307/go.mod h1:ugQv1QllJzBny3cKZKxUrSnykkjkBgm27eQM6dnGAcc= github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= diff --git a/go.mod b/go.mod index ef7eb01df5c..30eaf71de40 100644 --- a/go.mod +++ b/go.mod @@ -186,7 +186,7 @@ require ( github.com/koron/go-ssdp v0.0.4 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509080801-633688230242 // indirect + github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510043237-3afa4a26e31d // indirect github.com/ledgerwatch/erigonwatch v0.1.0 github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect diff --git a/go.sum b/go.sum index 13d2c7d6f87..6539a0b7ba4 100644 --- a/go.sum +++ b/go.sum @@ -539,8 +539,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509080801-633688230242 h1:EvaRNRzZc8OXCWutZnq93kdSSP9AiVuozwI8nTjeEYA= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240509080801-633688230242/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510043237-3afa4a26e31d h1:pOxi2nvq6A5cQ2k7n08tXTqVxNAvbUTuzF8bZhb6ooQ= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510043237-3afa4a26e31d/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/erigonwatch v0.1.0 h1:TrCjklOu9ZI9/uiMigo1Jnknnk1I/dXUxXymA3xHfzo= github.com/ledgerwatch/erigonwatch v0.1.0/go.mod h1:uYq4hs3RL1OtIYRXAxYq02tpdGkx6rtXlpzdazDDbWI= github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= From fb4d023bec9d966bc5ad9c5305e61a06e8c0dd13 Mon Sep 17 00:00:00 2001 From: Michelangelo Riccobene Date: Fri, 10 May 2024 10:43:46 +0200 Subject: [PATCH 14/48] qa-tests: avoid --internalcl flag with Erigon3 (#10272) The recent removal of the `--internalcl` flag has introduced challenges to the Erigon launch command line during testing. This PR introduces the ability to handle command lines that vary slightly depending on the software version. It is crucial that the command line remains linked to the workflow because it is specific to a repository commit. At the same time, the command line must be determined by the test script, which needs to be aware of certain parameters that influence measurements or operations. For this iteration, we have opted to pass the Erigon version to the script, allowing it to decide on the appropriate command line configuration. This approach ensures that the test environment is both adaptable and precise, aligning with specific version requirements. Over time, we will determine whether this approach is adequate. --- .github/workflows/qa-clean-exit-block-downloading.yml | 2 +- .github/workflows/qa-clean-exit-snapshot-downloading.yml | 2 +- .github/workflows/qa-snap-download.yml | 2 +- .github/workflows/qa-tip-tracking.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/qa-clean-exit-block-downloading.yml b/.github/workflows/qa-clean-exit-block-downloading.yml index 1f3c9f406a0..345bdfa4c1b 100644 --- a/.github/workflows/qa-clean-exit-block-downloading.yml +++ b/.github/workflows/qa-clean-exit-block-downloading.yml @@ -49,7 +49,7 @@ jobs: set +e # Disable exit on error # Run Erigon, send ctrl-c and check logs - python3 $ERIGON_QA_PATH/test_system/qa-tests/clean-exit/run_and_check_clean_exit.py ${{ github.workspace }}/build/bin $ERIGON_TESTBED_DATA_DIR $WORKING_TIME_SECONDS + python3 $ERIGON_QA_PATH/test_system/qa-tests/clean-exit/run_and_check_clean_exit.py ${{ github.workspace }}/build/bin $ERIGON_TESTBED_DATA_DIR $WORKING_TIME_SECONDS Erigon3 # Capture monitoring script exit status test_exit_status=$? diff --git a/.github/workflows/qa-clean-exit-snapshot-downloading.yml b/.github/workflows/qa-clean-exit-snapshot-downloading.yml index 79819132cd1..0ba3307b397 100644 --- a/.github/workflows/qa-clean-exit-snapshot-downloading.yml +++ b/.github/workflows/qa-clean-exit-snapshot-downloading.yml @@ -45,7 +45,7 @@ jobs: set +e # Disable exit on error # Run Erigon, send ctrl-c and check logs - python3 $ERIGON_QA_PATH/test_system/qa-tests/clean-exit/run_and_check_clean_exit.py ${{ github.workspace }}/build/bin $ERIGON_DATA_DIR $WORKING_TIME_SECONDS + python3 $ERIGON_QA_PATH/test_system/qa-tests/clean-exit/run_and_check_clean_exit.py ${{ github.workspace }}/build/bin $ERIGON_DATA_DIR $WORKING_TIME_SECONDS Erigon3 # Capture monitoring script exit status test_exit_status=$? diff --git a/.github/workflows/qa-snap-download.yml b/.github/workflows/qa-snap-download.yml index 146cfa66fbd..2b5039fa015 100644 --- a/.github/workflows/qa-snap-download.yml +++ b/.github/workflows/qa-snap-download.yml @@ -39,7 +39,7 @@ jobs: set +e # Disable exit on error # Run Erigon, monitor snapshot downloading and check logs - python3 $ERIGON_QA_PATH/test_system/qa-tests/snap-download/run_and_check_snap_download.py ${{ github.workspace }}/build/bin $ERIGON_DATA_DIR $TOTAL_TIME_SECONDS + python3 $ERIGON_QA_PATH/test_system/qa-tests/snap-download/run_and_check_snap_download.py ${{ github.workspace }}/build/bin $ERIGON_DATA_DIR $TOTAL_TIME_SECONDS Erigon3 # Capture monitoring script exit status test_exit_status=$? diff --git a/.github/workflows/qa-tip-tracking.yml b/.github/workflows/qa-tip-tracking.yml index c9a45413276..0ac40b50cef 100644 --- a/.github/workflows/qa-tip-tracking.yml +++ b/.github/workflows/qa-tip-tracking.yml @@ -45,7 +45,7 @@ jobs: # 1. Launch the testbed Erigon instance # 2. Allow time for the Erigon to achieve synchronization # 3. Begin timing the duration that Erigon maintains synchronization - python3 $ERIGON_QA_PATH/test_system/qa-tests/tip-tracking/run_and_check_tip_tracking.py ${{ github.workspace }}/build/bin $ERIGON_TESTBED_DATA_DIR $TRACKING_TIME_SECONDS $TOTAL_TIME_SECONDS + python3 $ERIGON_QA_PATH/test_system/qa-tests/tip-tracking/run_and_check_tip_tracking.py ${{ github.workspace }}/build/bin $ERIGON_TESTBED_DATA_DIR $TRACKING_TIME_SECONDS $TOTAL_TIME_SECONDS Erigon3 # Capture monitoring script exit status test_exit_status=$? From a727a34dc77ab0665dc9be210df6a2d149be51be Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Fri, 10 May 2024 14:49:42 +0200 Subject: [PATCH 15/48] e3: adding the proper pruned node (known as full node) (#10165) This PR adds the following feauture: ## Limited downloading of snapshot files If the user requests a pruned node, we avoid downloading the snapshot files that are below the range requested. We do this by building a `blacklist` and then just skip all snapshots in such blacklist. #### IMPORTANT due to the different unit of measurement (block number vs step size), we need to first download headers and bodies, and then convert block number of blocks to keep to steps by iterating over the bodies snapshots previously downloaded. ### Pruning of block snapshots Now blocks can be pruned through the `Delete` method added to the `Snapshots` struct. this happens as part of block retirement at the end after `retireBlocks` so that we do not cause nasty race conditions. it is injected through a function reference in the `retire` flow. --- .../block_collector/block_collector.go | 1 + cl/phase1/stages/stage_history_download.go | 1 + cmd/capcli/cli.go | 75 ------ cmd/integration/commands/flags.go | 48 ++-- cmd/integration/commands/stages.go | 6 +- erigon-lib/kv/tables.go | 2 + eth/stagedsync/stage_snapshots.go | 88 ++++++- ethdb/prune/storage_mode.go | 35 ++- ethdb/prune/storage_mode_test.go | 6 +- go.mod | 2 +- params/network_params.go | 2 +- turbo/app/snapshots_cmd.go | 2 +- turbo/cli/default_flags.go | 2 + turbo/cli/flags.go | 44 +++- turbo/execution/eth1/block_building.go | 2 - turbo/execution/eth1/inserters.go | 5 + turbo/services/interfaces.go | 4 +- .../snapshotsync/freezeblocks/block_reader.go | 1 - .../freezeblocks/block_snapshots.go | 67 ++++- .../freezeblocks/block_snapshots_test.go | 119 ++++++--- turbo/snapshotsync/snapshotsync.go | 243 +++++++++++++++++- turbo/snapshotsync/snapshotsync_test.go | 43 ++++ turbo/stages/mock/mock_sentry.go | 2 +- turbo/stages/stageloop.go | 7 +- 24 files changed, 628 insertions(+), 179 deletions(-) create mode 100644 turbo/snapshotsync/snapshotsync_test.go diff --git a/cl/phase1/execution_client/block_collector/block_collector.go b/cl/phase1/execution_client/block_collector/block_collector.go index 933dbba2118..5d2b0b9d46e 100644 --- a/cl/phase1/execution_client/block_collector/block_collector.go +++ b/cl/phase1/execution_client/block_collector/block_collector.go @@ -111,6 +111,7 @@ func (b *blockCollector) Flush(ctx context.Context) error { } blocksBatch = append(blocksBatch, types.NewBlockFromStorage(executionPayload.BlockHash, header, txs, nil, body.Withdrawals)) if len(blocksBatch) >= batchSize { + b.logger.Info("[Caplin] Inserting blocks", "from", blocksBatch[0].NumberU64(), "to", blocksBatch[len(blocksBatch)-1].NumberU64()) if err := b.engine.InsertBlocks(ctx, blocksBatch, true); err != nil { b.logger.Warn("failed to insert blocks", "err", err) } diff --git a/cl/phase1/stages/stage_history_download.go b/cl/phase1/stages/stage_history_download.go index 8f379d05597..6c5ad919035 100644 --- a/cl/phase1/stages/stage_history_download.go +++ b/cl/phase1/stages/stage_history_download.go @@ -170,6 +170,7 @@ func SpawnStageHistoryDownload(cfg StageHistoryReconstructionCfg, ctx context.Co logArgs = append(logArgs, "slot", currProgress, "blockNumber", currEth1Progress.Load(), + "frozenBlocks", cfg.engine.FrozenBlocks(ctx), "blk/sec", fmt.Sprintf("%.1f", speed), "snapshots", cfg.sn.SegmentsMax(), ) diff --git a/cmd/capcli/cli.go b/cmd/capcli/cli.go index d9b43f84a82..f088b60279f 100644 --- a/cmd/capcli/cli.go +++ b/cmd/capcli/cli.go @@ -18,17 +18,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" - lg "github.com/anacrolix/log" - - "github.com/ledgerwatch/erigon-lib/direct" - downloader3 "github.com/ledgerwatch/erigon-lib/downloader" "github.com/ledgerwatch/erigon-lib/metrics" - state2 "github.com/ledgerwatch/erigon-lib/state" - - "github.com/c2h5oh/datasize" - - "github.com/ledgerwatch/erigon-lib/chain/snapcfg" - "github.com/ledgerwatch/erigon-lib/downloader" "github.com/ledgerwatch/erigon/cl/antiquary" "github.com/ledgerwatch/erigon/cl/clparams" @@ -37,12 +27,9 @@ import ( "github.com/ledgerwatch/erigon/cmd/caplin/caplin1" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/ethconfig/estimate" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" "github.com/ledgerwatch/erigon-lib/common/datadir" - "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg" "github.com/ledgerwatch/erigon-lib/downloader/snaptype" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" @@ -69,7 +56,6 @@ var CLI struct { Chain Chain `cmd:"" help:"download the entire chain from reqresp network"` DumpSnapshots DumpSnapshots `cmd:"" help:"generate caplin snapshots"` CheckSnapshots CheckSnapshots `cmd:"" help:"check snapshot folder against content of chain data"` - DownloadSnapshots DownloadSnapshots `cmd:"" help:"download snapshots from webseed"` LoopSnapshots LoopSnapshots `cmd:"" help:"loop over snapshots"` RetrieveHistoricalState RetrieveHistoricalState `cmd:"" help:"retrieve historical state from db"` ChainEndpoint ChainEndpoint `cmd:"" help:"chain endpoint"` @@ -473,67 +459,6 @@ func (c *LoopSnapshots) Run(ctx *Context) error { return nil } -type DownloadSnapshots struct { - chainCfg - outputFolder -} - -func (d *DownloadSnapshots) Run(ctx *Context) error { - webSeeds := snapcfg.KnownWebseeds[d.Chain] - dirs := datadir.New(d.Datadir) - - _, beaconConfig, _, err := clparams.GetConfigsByNetworkName(d.Chain) - if err != nil { - return err - } - - log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StderrHandler)) - - db, _, err := caplin1.OpenCaplinDatabase(ctx, db_config.DatabaseConfiguration{PruneDepth: math.MaxUint64}, beaconConfig, nil, dirs.CaplinIndexing, dirs.CaplinBlobs, nil, false, 0) - if err != nil { - return err - } - tx, err := db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - downloadRate, err := datasize.ParseString("16mb") - if err != nil { - return err - } - - uploadRate, err := datasize.ParseString("0mb") - if err != nil { - return err - } - version := "erigon: " + params.VersionWithCommit(params.GitCommit) - - downloaderCfg, err := downloadercfg.New(dirs, version, lg.Info, downloadRate, uploadRate, 42069, 10, 3, nil, webSeeds, d.Chain, true) - if err != nil { - return err - } - downlo, err := downloader.New(ctx, downloaderCfg, log.Root(), log.LvlInfo, true) - if err != nil { - return err - } - s, err := state2.NewAggregator(ctx, dirs, 200000, db, log.Root()) - if err != nil { - return err - } - downlo.MainLoopInBackground(false) - bittorrentServer, err := downloader3.NewGrpcServer(downlo) - if err != nil { - return fmt.Errorf("new server: %w", err) - } - - return snapshotsync.WaitForDownloader(ctx, "CapCliDownloader", false, false, snapshotsync.OnlyCaplin, s, tx, - freezeblocks.NewBlockReader( - freezeblocks.NewRoSnapshots(ethconfig.NewSnapCfg(false, false, false), dirs.Snap, 0, log.Root()), - freezeblocks.NewBorRoSnapshots(ethconfig.NewSnapCfg(false, false, false), dirs.Snap, 0, log.Root())), - params.ChainConfigByChainName(d.Chain), direct.NewDownloaderClient(bittorrentServer), []string{}) -} - type RetrieveHistoricalState struct { chainCfg outputFolder diff --git a/cmd/integration/commands/flags.go b/cmd/integration/commands/flags.go index db19c885865..c1ace6d7387 100644 --- a/cmd/integration/commands/flags.go +++ b/cmd/integration/commands/flags.go @@ -10,30 +10,30 @@ import ( ) var ( - chaindata string - databaseVerbosity int - referenceChaindata string - block, pruneTo, unwind uint64 - unwindEvery uint64 - batchSizeStr string - reset, warmup, noCommit bool - resetPruneAt bool - bucket string - datadirCli, toChaindata string - migration string - squeezeCommitmentFiles bool - integrityFast, integritySlow bool - file string - HeimdallURL string - txtrace bool // Whether to trace the execution (should only be used together with `block`) - pruneFlag string - pruneH, pruneR, pruneT, pruneC uint64 - pruneHBefore, pruneRBefore uint64 - pruneTBefore, pruneCBefore uint64 - experiments []string - unwindTypes []string - chain string // Which chain to use (mainnet, goerli, sepolia, etc.) - outputCsvFile string + chaindata string + databaseVerbosity int + referenceChaindata string + block, pruneTo, unwind uint64 + unwindEvery uint64 + batchSizeStr string + reset, warmup, noCommit bool + resetPruneAt bool + bucket string + datadirCli, toChaindata string + migration string + squeezeCommitmentFiles bool + integrityFast, integritySlow bool + file string + HeimdallURL string + txtrace bool // Whether to trace the execution (should only be used together with `block`) + pruneFlag string + pruneB, pruneH, pruneR, pruneT, pruneC uint64 + pruneBBefore, pruneHBefore, pruneRBefore uint64 + pruneTBefore, pruneCBefore uint64 + experiments []string + unwindTypes []string + chain string // Which chain to use (mainnet, goerli, sepolia, etc.) + outputCsvFile string commitmentMode string commitmentTrie string diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index 891b5518e5c..3414e4a7e10 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -776,10 +776,12 @@ func init() { withDataDir(cmdSetPrune) withChain(cmdSetPrune) cmdSetPrune.Flags().StringVar(&pruneFlag, "prune", "hrtc", "") + cmdSetPrune.Flags().Uint64Var(&pruneB, "prune.b.older", 0, "") cmdSetPrune.Flags().Uint64Var(&pruneH, "prune.h.older", 0, "") cmdSetPrune.Flags().Uint64Var(&pruneR, "prune.r.older", 0, "") cmdSetPrune.Flags().Uint64Var(&pruneT, "prune.t.older", 0, "") cmdSetPrune.Flags().Uint64Var(&pruneC, "prune.c.older", 0, "") + cmdSetPrune.Flags().Uint64Var(&pruneBBefore, "prune.b.before", 0, "") cmdSetPrune.Flags().Uint64Var(&pruneHBefore, "prune.h.before", 0, "") cmdSetPrune.Flags().Uint64Var(&pruneRBefore, "prune.r.before", 0, "") cmdSetPrune.Flags().Uint64Var(&pruneTBefore, "prune.t.before", 0, "") @@ -2005,8 +2007,8 @@ func stage(st *stagedsync.Sync, tx kv.Tx, db kv.RoDB, stage stages.SyncStage) *s func overrideStorageMode(db kv.RwDB, logger log.Logger) error { chainConfig := fromdb.ChainConfig(db) - pm, err := prune.FromCli(chainConfig.ChainID.Uint64(), pruneFlag, pruneH, pruneR, pruneT, pruneC, - pruneHBefore, pruneRBefore, pruneTBefore, pruneCBefore, experiments) + pm, err := prune.FromCli(chainConfig.ChainID.Uint64(), pruneFlag, pruneB, pruneH, pruneR, pruneT, pruneC, + pruneHBefore, pruneRBefore, pruneTBefore, pruneCBefore, pruneBBefore, experiments) if err != nil { return err } diff --git a/erigon-lib/kv/tables.go b/erigon-lib/kv/tables.go index 204fa32eec3..a68731692e1 100644 --- a/erigon-lib/kv/tables.go +++ b/erigon-lib/kv/tables.go @@ -528,6 +528,8 @@ var ( PruneTxIndexType = []byte("pruneTxIndexType") PruneCallTraces = []byte("pruneCallTraces") PruneCallTracesType = []byte("pruneCallTracesType") + PruneBlocks = []byte("pruneBlocks") + PruneBlocksType = []byte("pruneBlocksType") DBSchemaVersionKey = []byte("dbVersion") diff --git a/eth/stagedsync/stage_snapshots.go b/eth/stagedsync/stage_snapshots.go index 1304d93e738..5743f983d9b 100644 --- a/eth/stagedsync/stage_snapshots.go +++ b/eth/stagedsync/stage_snapshots.go @@ -44,6 +44,7 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/ethconfig/estimate" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/ethdb/prune" borsnaptype "github.com/ledgerwatch/erigon/polygon/bor/snaptype" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/services" @@ -70,6 +71,7 @@ type SnapshotsCfg struct { silkworm *silkworm.Silkworm snapshotUploader *snapshotUploader syncConfig ethconfig.Sync + prune prune.Mode } func StageSnapshotsCfg(db kv.RwDB, @@ -85,6 +87,7 @@ func StageSnapshotsCfg(db kv.RwDB, caplin bool, blobs bool, silkworm *silkworm.Silkworm, + prune prune.Mode, ) SnapshotsCfg { cfg := SnapshotsCfg{ db: db, @@ -100,6 +103,7 @@ func StageSnapshotsCfg(db kv.RwDB, silkworm: silkworm, syncConfig: syncConfig, blobs: blobs, + prune: prune, } if uploadFs := cfg.syncConfig.UploadLocation; len(uploadFs) > 0 { @@ -154,7 +158,6 @@ func SpawnStageSnapshots( } defer tx.Rollback() } - if err := DownloadAndIndexSnapshotsIfNeed(s, ctx, tx, cfg, initialCycle, logger); err != nil { return err } @@ -234,7 +237,16 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R } } } else { - if err := snapshotsync.WaitForDownloader(ctx, s.LogPrefix(), cfg.historyV3, cfg.blobs, cstate, cfg.agg, tx, cfg.blockReader, &cfg.chainConfig, cfg.snapshotDownloader, s.state.StagesIdsList()); err != nil { + + // Download only the snapshots that are for the header chain. + if err := snapshotsync.WaitForDownloader(ctx, s.LogPrefix() /*headerChain=*/, true, cfg.historyV3, cfg.blobs, cfg.prune, cstate, cfg.agg, tx, cfg.blockReader, &cfg.chainConfig, cfg.snapshotDownloader, s.state.StagesIdsList()); err != nil { + return err + } + if err := cfg.blockReader.Snapshots().ReopenSegments([]snaptype.Type{coresnaptype.Headers, coresnaptype.Bodies}, true); err != nil { + return err + } + + if err := snapshotsync.WaitForDownloader(ctx, s.LogPrefix() /*headerChain=*/, false, cfg.historyV3, cfg.blobs, cfg.prune, cstate, cfg.agg, tx, cfg.blockReader, &cfg.chainConfig, cfg.snapshotDownloader, s.state.StagesIdsList()); err != nil { return err } } @@ -418,6 +430,12 @@ func FillDBFromSnapshots(logPrefix string, ctx context.Context, tx kv.RwTx, dirs return nil } +func computeBlocksToPrune(cfg SnapshotsCfg) (blocksToPrune uint64, historyToPrune uint64) { + frozenBlocks := cfg.blockReader.Snapshots().SegmentsMax() + fmt.Println("O", cfg.prune.Blocks.PruneTo(frozenBlocks), cfg.prune.History.PruneTo(frozenBlocks)) + return frozenBlocks - cfg.prune.Blocks.PruneTo(frozenBlocks), frozenBlocks - cfg.prune.History.PruneTo(frozenBlocks) +} + /* ====== PRUNING ====== */ // snapshots pruning sections works more as a retiring of blocks // retiring blocks means moving block data from db into snapshots @@ -477,6 +495,12 @@ func SnapshotsPrune(s *PruneState, initialCycle bool, cfg SnapshotsCfg, ctx cont } return nil + }, func() error { + filesDeleted, err := pruneBlockSnapshots(ctx, cfg, logger) + if filesDeleted && cfg.notifier != nil { + cfg.notifier.Events.OnNewSnapshot() + } + return err }) //cfg.agg.BuildFilesInBackground() @@ -522,6 +546,66 @@ func SnapshotsPrune(s *PruneState, initialCycle bool, cfg SnapshotsCfg, ctx cont return nil } +func pruneBlockSnapshots(ctx context.Context, cfg SnapshotsCfg, logger log.Logger) (bool, error) { + tx, err := cfg.db.BeginRo(ctx) + if err != nil { + return false, err + } + defer tx.Rollback() + // Prune snapshots if necessary (remove .segs or idx files appropriatelly) + headNumber := cfg.blockReader.FrozenBlocks() + executionProgress, err := stages.GetStageProgress(tx, stages.Execution) + if err != nil { + return false, err + } + // If we are behind the execution stage, we should not prune snapshots + if headNumber > executionProgress { + return false, nil + } + + // Keep at least 2 block snapshots as we do not want FrozenBlocks to be 0 + pruneAmount, _ := computeBlocksToPrune(cfg) + if pruneAmount == 0 { + return false, nil + } + + minBlockNumberToKeep := uint64(0) + if headNumber > pruneAmount { + minBlockNumberToKeep = headNumber - pruneAmount + } + + snapshotFileNames := cfg.blockReader.FrozenFiles() + filesDeleted := false + // Prune blocks snapshots if necessary + for _, file := range snapshotFileNames { + if !cfg.prune.Blocks.Enabled() || headNumber == 0 || !strings.Contains(file, "transactions") { + continue + } + + // take the snapshot file name and parse it to get the "from" + info, _, ok := snaptype.ParseFileName(cfg.dirs.Snap, file) + if !ok { + continue + } + if info.To >= minBlockNumberToKeep { + continue + } + if info.To-info.From != snaptype.Erigon2MergeLimit { + continue + } + if cfg.snapshotDownloader != nil { + if _, err := cfg.snapshotDownloader.Delete(ctx, &protodownloader.DeleteRequest{Paths: []string{file}}); err != nil { + return filesDeleted, err + } + } + if err := cfg.blockReader.Snapshots().Delete(file); err != nil { + return filesDeleted, err + } + filesDeleted = true + } + return filesDeleted, nil +} + type uploadState struct { sync.Mutex file string diff --git a/ethdb/prune/storage_mode.go b/ethdb/prune/storage_mode.go index c1fc3be5242..616f7d98240 100644 --- a/ethdb/prune/storage_mode.go +++ b/ethdb/prune/storage_mode.go @@ -20,14 +20,15 @@ var DefaultMode = Mode{ Receipts: Distance(math.MaxUint64), TxIndex: Distance(math.MaxUint64), CallTraces: Distance(math.MaxUint64), + Blocks: Distance(math.MaxUint64), Experiments: Experiments{}, // all off } type Experiments struct { } -func FromCli(chainId uint64, flags string, exactHistory, exactReceipts, exactTxIndex, exactCallTraces, - beforeH, beforeR, beforeT, beforeC uint64, experiments []string) (Mode, error) { +func FromCli(chainId uint64, flags string, exactBlocks, exactHistory, exactReceipts, exactTxIndex, exactCallTraces, + beforeB, beforeH, beforeR, beforeT, beforeC uint64, experiments []string) (Mode, error) { mode := DefaultMode if flags != "default" && flags != "disabled" { @@ -41,12 +42,17 @@ func FromCli(chainId uint64, flags string, exactHistory, exactReceipts, exactTxI mode.TxIndex = Distance(params.FullImmutabilityThreshold) case 'c': mode.CallTraces = Distance(params.FullImmutabilityThreshold) + case 'b': + mode.Blocks = Distance(params.FullImmutabilityThreshold) default: return DefaultMode, fmt.Errorf("unexpected flag found: %c", flag) } } } + if exactBlocks > 0 { + mode.Blocks = Distance(exactBlocks) + } if exactHistory > 0 { mode.History = Distance(exactHistory) } @@ -72,6 +78,9 @@ func FromCli(chainId uint64, flags string, exactHistory, exactReceipts, exactTxI if beforeC > 0 { mode.CallTraces = Before(beforeC) } + if beforeB > 0 { + mode.Blocks = Before(beforeB) + } for _, ex := range experiments { switch ex { @@ -120,6 +129,14 @@ func Get(db kv.Getter) (Mode, error) { prune.CallTraces = blockAmount } + blockAmount, err = get(db, kv.PruneBlocks) + if err != nil { + return prune, err + } + if blockAmount != nil { + prune.Blocks = blockAmount + } + return prune, nil } @@ -129,6 +146,7 @@ type Mode struct { Receipts BlockAmount TxIndex BlockAmount CallTraces BlockAmount + Blocks BlockAmount Experiments Experiments } @@ -194,6 +212,13 @@ func (m Mode) String() string { long += fmt.Sprintf(" --prune.h.%s=%d", m.History.dbType(), m.History.toValue()) } } + if m.Blocks.Enabled() { + if m.Blocks.useDefaultValue() { + short += fmt.Sprintf(" --prune.b.older=%d", defaultVal) + } else { + long += fmt.Sprintf(" --prune.b.%s=%d", m.Blocks.dbType(), m.Blocks.toValue()) + } + } if m.Receipts.Enabled() { if m.Receipts.useDefaultValue() { short += fmt.Sprintf(" --prune.r.older=%d", defaultVal) @@ -244,6 +269,11 @@ func Override(db kv.RwTx, sm Mode) error { return err } + err = set(db, kv.PruneBlocks, sm.Blocks) + if err != nil { + return err + } + return nil } @@ -290,6 +320,7 @@ func setIfNotExist(db kv.GetPut, pm Mode) error { string(kv.PruneReceipts): pm.Receipts, string(kv.PruneTxIndex): pm.TxIndex, string(kv.PruneCallTraces): pm.CallTraces, + string(kv.PruneBlocks): pm.Blocks, } for key, value := range pruneDBData { diff --git a/ethdb/prune/storage_mode_test.go b/ethdb/prune/storage_mode_test.go index a5aeca248ac..bdddb99e93d 100644 --- a/ethdb/prune/storage_mode_test.go +++ b/ethdb/prune/storage_mode_test.go @@ -15,16 +15,16 @@ func TestSetStorageModeIfNotExist(t *testing.T) { prune, err := Get(tx) assert.NoError(t, err) assert.Equal(t, Mode{true, Distance(math.MaxUint64), Distance(math.MaxUint64), - Distance(math.MaxUint64), Distance(math.MaxUint64), Experiments{}}, prune) + Distance(math.MaxUint64), Distance(math.MaxUint64), Distance(math.MaxUint64), Experiments{}}, prune) err = setIfNotExist(tx, Mode{true, Distance(1), Distance(2), - Before(3), Before(4), Experiments{}}) + Before(3), Before(4), Before(100), Experiments{}}) assert.NoError(t, err) prune, err = Get(tx) assert.NoError(t, err) assert.Equal(t, Mode{true, Distance(1), Distance(2), - Before(3), Before(4), Experiments{}}, prune) + Before(3), Before(4), Before(100), Experiments{}}, prune) } var distanceTests = []struct { diff --git a/go.mod b/go.mod index 30eaf71de40..9237d08048f 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,6 @@ require ( github.com/VictoriaMetrics/fastcache v1.12.2 github.com/alecthomas/atomic v0.1.0-alpha2 github.com/alecthomas/kong v0.8.1 - github.com/anacrolix/log v0.15.2 github.com/anacrolix/sync v0.5.1 github.com/anacrolix/torrent v1.52.6-0.20231201115409-7ea994b6bbd8 github.com/benesch/cgosymbolizer v0.0.0-20190515212042-bec6fe6e597b @@ -129,6 +128,7 @@ require ( github.com/anacrolix/envpprof v1.3.0 // indirect github.com/anacrolix/generics v0.0.0-20230816105729-c755655aee45 // indirect github.com/anacrolix/go-libutp v1.3.1 // indirect + github.com/anacrolix/log v0.15.2 // indirect github.com/anacrolix/missinggo v1.3.0 // indirect github.com/anacrolix/missinggo/perf v1.0.0 // indirect github.com/anacrolix/missinggo/v2 v2.7.2-0.20230527121029-a582b4f397b9 // indirect diff --git a/params/network_params.go b/params/network_params.go index e914ff51a67..d79192516e8 100644 --- a/params/network_params.go +++ b/params/network_params.go @@ -57,5 +57,5 @@ const ( // considered immutable (i.e. soft finality). It is used by the downloader as a // hard limit against deep ancestors, by the blockchain against deep reorgs, by // the freezer as the cutoff threshold and by clique as the snapshot trust limit. - FullImmutabilityThreshold = 90000 + FullImmutabilityThreshold = 100_000 ) diff --git a/turbo/app/snapshots_cmd.go b/turbo/app/snapshots_cmd.go index 4a5f3192ad5..7092a079e78 100644 --- a/turbo/app/snapshots_cmd.go +++ b/turbo/app/snapshots_cmd.go @@ -783,7 +783,7 @@ func doRetireCommand(cliCtx *cli.Context) error { } logger.Info("Params", "from", from, "to", to, "every", every) - if err := br.RetireBlocks(ctx, 0, forwardProgress, log.LvlInfo, nil, nil); err != nil { + if err := br.RetireBlocks(ctx, 0, forwardProgress, log.LvlInfo, nil, nil, nil); err != nil { return err } diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index b06965b0d9e..162bae95cfd 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -28,10 +28,12 @@ var DefaultFlags = []cli.Flag{ &utils.TxPoolTraceSendersFlag, &utils.TxPoolCommitEveryFlag, &PruneFlag, + &PruneBlocksFlag, &PruneHistoryFlag, &PruneReceiptFlag, &PruneTxIndexFlag, &PruneCallTracesFlag, + &PruneBlocksBeforeFlag, &PruneHistoryBeforeFlag, &PruneReceiptBeforeFlag, &PruneTxIndexBeforeFlag, diff --git a/turbo/cli/flags.go b/turbo/cli/flags.go index 1f4f5b6e4fd..7458a6d16ea 100644 --- a/turbo/cli/flags.go +++ b/turbo/cli/flags.go @@ -2,6 +2,7 @@ package cli import ( "fmt" + "math" "time" "github.com/ledgerwatch/erigon-lib/common/hexutil" @@ -76,6 +77,10 @@ var ( Example: --prune=htc`, Value: "disabled", } + PruneBlocksFlag = cli.Uint64Flag{ + Name: "prune.b.older", + Usage: `Prune data older than this number of blocks from the tip of the chain (if --prune flag has 'b', then default is 90K)`, + } PruneHistoryFlag = cli.Uint64Flag{ Name: "prune.h.older", Usage: `Prune data older than this number of blocks from the tip of the chain (if --prune flag has 'h', then default is 90K)`, @@ -109,6 +114,10 @@ var ( Name: "prune.c.before", Usage: `Prune data before this block`, } + PruneBlocksBeforeFlag = cli.Uint64Flag{ + Name: "prune.b.before", + Usage: `Prune data before this block`, + } ExperimentsFlag = cli.StringFlag{ Name: "experiments", @@ -258,10 +267,15 @@ func ApplyFlagsForEthConfig(ctx *cli.Context, cfg *ethconfig.Config, logger log. if cfg.Genesis != nil { chainId = cfg.Genesis.Config.ChainID.Uint64() } - + minimal := ctx.String(PruneFlag.Name) == "minimal" + pruneFlagString := ctx.String(PruneFlag.Name) + if minimal { + pruneFlagString = "htrcb" + } mode, err := prune.FromCli( chainId, - ctx.String(PruneFlag.Name), + pruneFlagString, + ctx.Uint64(PruneBlocksFlag.Name), ctx.Uint64(PruneHistoryFlag.Name), ctx.Uint64(PruneReceiptFlag.Name), ctx.Uint64(PruneTxIndexFlag.Name), @@ -270,8 +284,13 @@ func ApplyFlagsForEthConfig(ctx *cli.Context, cfg *ethconfig.Config, logger log. ctx.Uint64(PruneReceiptBeforeFlag.Name), ctx.Uint64(PruneTxIndexBeforeFlag.Name), ctx.Uint64(PruneCallTracesBeforeFlag.Name), + ctx.Uint64(PruneBlocksBeforeFlag.Name), libcommon.CliString2Array(ctx.String(ExperimentsFlag.Name)), ) + if err != nil { + utils.Fatalf(fmt.Sprintf("error while parsing mode: %v", err)) + } + if err != nil { utils.Fatalf(fmt.Sprintf("error while parsing mode: %v", err)) } @@ -293,6 +312,15 @@ func ApplyFlagsForEthConfig(ctx *cli.Context, cfg *ethconfig.Config, logger log. etl.BufferOptimalSize = *size } + if minimal { + // Prune them all. + cfg.Prune.Blocks = prune.Before(math.MaxUint64) + cfg.Prune.History = prune.Before(math.MaxUint64) + cfg.Prune.Receipts = prune.Before(math.MaxUint64) + cfg.Prune.TxIndex = prune.Before(math.MaxUint64) + cfg.Prune.CallTraces = prune.Before(math.MaxUint64) + } + cfg.StateStream = !ctx.Bool(StateStreamDisableFlag.Name) if ctx.String(BodyCacheLimitFlag.Name) != "" { err := cfg.Sync.BodyCacheLimit.UnmarshalText([]byte(ctx.String(BodyCacheLimitFlag.Name))) @@ -365,7 +393,10 @@ func ApplyFlagsForEthConfigCobra(f *pflag.FlagSet, cfg *ethconfig.Config) { if exp := f.StringSlice(ExperimentsFlag.Name, nil, ExperimentsFlag.Usage); exp != nil { experiments = *exp } - var exactH, exactR, exactT, exactC uint64 + var exactB, exactH, exactR, exactT, exactC uint64 + if v := f.Uint64(PruneBlocksFlag.Name, PruneBlocksFlag.Value, PruneBlocksFlag.Usage); v != nil { + exactB = *v + } if v := f.Uint64(PruneHistoryFlag.Name, PruneHistoryFlag.Value, PruneHistoryFlag.Usage); v != nil { exactH = *v } @@ -379,7 +410,10 @@ func ApplyFlagsForEthConfigCobra(f *pflag.FlagSet, cfg *ethconfig.Config) { exactC = *v } - var beforeH, beforeR, beforeT, beforeC uint64 + var beforeB, beforeH, beforeR, beforeT, beforeC uint64 + if v := f.Uint64(PruneBlocksBeforeFlag.Name, PruneBlocksBeforeFlag.Value, PruneBlocksBeforeFlag.Usage); v != nil { + beforeB = *v + } if v := f.Uint64(PruneHistoryBeforeFlag.Name, PruneHistoryBeforeFlag.Value, PruneHistoryBeforeFlag.Usage); v != nil { beforeH = *v } @@ -398,7 +432,7 @@ func ApplyFlagsForEthConfigCobra(f *pflag.FlagSet, cfg *ethconfig.Config) { chainId = cfg.Genesis.Config.ChainID.Uint64() } - mode, err := prune.FromCli(chainId, *v, exactH, exactR, exactT, exactC, beforeH, beforeR, beforeT, beforeC, experiments) + mode, err := prune.FromCli(chainId, *v, exactB, exactH, exactR, exactT, exactC, beforeH, beforeR, beforeT, beforeC, beforeB, experiments) if err != nil { utils.Fatalf(fmt.Sprintf("error while parsing mode: %v", err)) } diff --git a/turbo/execution/eth1/block_building.go b/turbo/execution/eth1/block_building.go index 0d0a7f95e16..ec9cc6e9e5e 100644 --- a/turbo/execution/eth1/block_building.go +++ b/turbo/execution/eth1/block_building.go @@ -42,7 +42,6 @@ func (e *EthereumExecutionModule) evictOldBuilders() { // Missing: NewPayload, AssembleBlock func (e *EthereumExecutionModule) AssembleBlock(ctx context.Context, req *execution.AssembleBlockRequest) (*execution.AssembleBlockResponse, error) { if !e.semaphore.TryAcquire(1) { - e.logger.Warn("ethereumExecutionModule.AssembleBlock: ExecutionStatus_Busy") return &execution.AssembleBlockResponse{ Id: 0, Busy: true, @@ -109,7 +108,6 @@ func blockValue(br *types.BlockWithReceipts, baseFee *uint256.Int) *uint256.Int func (e *EthereumExecutionModule) GetAssembledBlock(ctx context.Context, req *execution.GetAssembledBlockRequest) (*execution.GetAssembledBlockResponse, error) { if !e.semaphore.TryAcquire(1) { - e.logger.Warn("ethereumExecutionModule.GetAssembledBlock: ExecutionStatus_Busy") return &execution.GetAssembledBlockResponse{ Busy: true, }, nil diff --git a/turbo/execution/eth1/inserters.go b/turbo/execution/eth1/inserters.go index cfd6ea04024..378c629ef8d 100644 --- a/turbo/execution/eth1/inserters.go +++ b/turbo/execution/eth1/inserters.go @@ -48,8 +48,13 @@ func (e *EthereumExecutionModule) InsertBlocks(ctx context.Context, req *executi } defer tx.Rollback() e.forkValidator.ClearWithUnwind(e.accumulator, e.stateChangeConsumer) + frozenBlocks := e.blockReader.FrozenBlocks() for _, block := range req.Blocks { + // Skip frozen blocks. + if block.Header.BlockNumber < frozenBlocks { + continue + } header, err := eth1_utils.HeaderRpcToHeader(block.Header) if err != nil { return nil, fmt.Errorf("ethereumExecutionModule.InsertBlocks: cannot convert headers: %s", err) diff --git a/turbo/services/interfaces.go b/turbo/services/interfaces.go index 81fa90110ab..dc4aa1474bf 100644 --- a/turbo/services/interfaces.go +++ b/turbo/services/interfaces.go @@ -116,8 +116,10 @@ type FullBlockReader interface { type BlockSnapshots interface { LogStat(label string) ReopenFolder() error + ReopenSegments(types []snaptype.Type, allowGaps bool) error SegmentsMax() uint64 SegmentsMin() uint64 + Delete(fileName string) error Types() []snaptype.Type Close() } @@ -125,7 +127,7 @@ type BlockSnapshots interface { // BlockRetire - freezing blocks: moving old data from DB to snapshot files type BlockRetire interface { PruneAncientBlocks(tx kv.RwTx, limit int) error - RetireBlocksInBackground(ctx context.Context, miBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []DownloadRequest) error, onDelete func(l []string) error) + RetireBlocksInBackground(ctx context.Context, miBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []DownloadRequest) error, onDelete func(l []string) error, onFinishRetire func() error) HasNewFrozenFiles() bool BuildMissedIndicesIfNeed(ctx context.Context, logPrefix string, notifier DBEventNotifier, cc *chain.Config) error SetWorkers(workers int) diff --git a/turbo/snapshotsync/freezeblocks/block_reader.go b/turbo/snapshotsync/freezeblocks/block_reader.go index 390c16a098c..d9f62adab31 100644 --- a/turbo/snapshotsync/freezeblocks/block_reader.go +++ b/turbo/snapshotsync/freezeblocks/block_reader.go @@ -1047,7 +1047,6 @@ func (r *BlockReader) FirstTxnNumNotInSnapshots() uint64 { func (r *BlockReader) IterateFrozenBodies(f func(blockNum, baseTxNum, txAmount uint64) error) error { view := r.sn.View() defer view.Close() - for _, sn := range view.Bodies() { sn := sn defer sn.EnableReadAhead().DisableReadAhead() diff --git a/turbo/snapshotsync/freezeblocks/block_snapshots.go b/turbo/snapshotsync/freezeblocks/block_snapshots.go index 7aaacc12b7a..7412b380649 100644 --- a/turbo/snapshotsync/freezeblocks/block_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/block_snapshots.go @@ -547,7 +547,6 @@ func (s *RoSnapshots) rebuildSegments(fileNames []string, open bool, optimistic } } if optimistic { - s.logger.Warn("[snapshots] open segment", "err", err) continue } else { return err @@ -725,6 +724,47 @@ func (s *RoSnapshots) buildMissedIndicesIfNeed(ctx context.Context, logPrefix st return nil } +func (s *RoSnapshots) delete(fileName string) error { + v := s.View() + defer v.Close() + + _, fName := filepath.Split(fileName) + var err error + s.segments.Scan(func(segtype snaptype.Enum, value *segments) bool { + idxsToRemove := []int{} + for i, sn := range value.segments { + if sn.Decompressor == nil { + continue + } + if sn.segType.FileName(sn.version, sn.from, sn.to) != fName { + continue + } + files := sn.openFiles() + sn.close() + idxsToRemove = append(idxsToRemove, i) + for _, f := range files { + _ = os.Remove(f) + } + } + for i := len(idxsToRemove) - 1; i >= 0; i-- { + value.segments = append(value.segments[:idxsToRemove[i]], value.segments[idxsToRemove[i]+1:]...) + } + return true + }) + return err +} + +func (s *RoSnapshots) Delete(fileName string) error { + if s == nil { + return nil + } + if err := s.delete(fileName); err != nil { + return fmt.Errorf("can't delete file: %w", err) + } + return s.ReopenFolder() + +} + func (s *RoSnapshots) buildMissedIndices(logPrefix string, ctx context.Context, dirs datadir.Dirs, chainConfig *chain.Config, workers int, logger log.Logger) error { if s == nil { return nil @@ -908,8 +948,11 @@ func sendDiagnostics(startIndexingTime time.Time, indexPercent map[string]int, a }) } -func noGaps(in []snaptype.FileInfo, from uint64) (out []snaptype.FileInfo, missingSnapshots []Range) { - prevTo := from +func noGaps(in []snaptype.FileInfo) (out []snaptype.FileInfo, missingSnapshots []Range) { + if len(in) == 0 { + return nil, nil + } + prevTo := in[0].From for _, f := range in { if f.To <= prevTo { continue @@ -1029,7 +1072,7 @@ func SegmentsCaplin(dir string, minBlock uint64) (res []snaptype.FileInfo, missi } l = append(l, f) } - l, m = noGaps(noOverlaps(l), minBlock) + l, m = noGaps(noOverlaps(l)) if len(m) > 0 { lst := m[len(m)-1] log.Debug("[snapshots] see gap", "type", snaptype.CaplinEnums.BeaconBlocks, "from", lst.from) @@ -1042,7 +1085,7 @@ func SegmentsCaplin(dir string, minBlock uint64) (res []snaptype.FileInfo, missi } func Segments(dir string, minBlock uint64) (res []snaptype.FileInfo, missingSnapshots []Range, err error) { - return typedSegments(dir, minBlock, coresnaptype.BlockSnapshotTypes, false) + return typedSegments(dir, minBlock, coresnaptype.BlockSnapshotTypes, true) } func typedSegments(dir string, minBlock uint64, types []snaptype.Type, allowGaps bool) (res []snaptype.FileInfo, missingSnapshots []Range, err error) { @@ -1070,7 +1113,7 @@ func typedSegments(dir string, minBlock uint64, types []snaptype.Type, allowGaps if allowGaps { l = noOverlaps(segmentsTypeCheck(dir, l)) } else { - l, m = noGaps(noOverlaps(segmentsTypeCheck(dir, l)), minBlock) + l, m = noGaps(noOverlaps(segmentsTypeCheck(dir, l))) } if len(m) > 0 { lst := m[len(m)-1] @@ -1349,7 +1392,7 @@ func (br *BlockRetire) PruneAncientBlocks(tx kv.RwTx, limit int) error { return nil } -func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, minBlockNum, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDeleteSnapshots func(l []string) error) { +func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, minBlockNum, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDeleteSnapshots func(l []string) error, onFinishRetire func() error) { if maxBlockNum > br.maxScheduledBlock.Load() { br.maxScheduledBlock.Store(maxBlockNum) } @@ -1370,7 +1413,7 @@ func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, minBlockNum defer br.snBuildAllowed.Release(1) } - err := br.RetireBlocks(ctx, minBlockNum, maxBlockNum, lvl, seedNewSnapshots, onDeleteSnapshots) + err := br.RetireBlocks(ctx, minBlockNum, maxBlockNum, lvl, seedNewSnapshots, onDeleteSnapshots, onFinishRetire) if err != nil { br.logger.Warn("[snapshots] retire blocks", "err", err) return @@ -1378,7 +1421,7 @@ func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, minBlockNum }() } -func (br *BlockRetire) RetireBlocks(ctx context.Context, minBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDeleteSnapshots func(l []string) error) error { +func (br *BlockRetire) RetireBlocks(ctx context.Context, minBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDeleteSnapshots func(l []string) error, onFinish func() error) error { if maxBlockNum > br.maxScheduledBlock.Load() { br.maxScheduledBlock.Store(maxBlockNum) } @@ -1415,6 +1458,11 @@ func (br *BlockRetire) RetireBlocks(ctx context.Context, minBlockNum uint64, max return err } } + if onFinish != nil { + if err := onFinish(); err != nil { + return err + } + } if !(ok || okBor) { break @@ -1438,7 +1486,6 @@ func (br *BlockRetire) BuildMissedIndicesIfNeed(ctx context.Context, logPrefix s } func DumpBlocks(ctx context.Context, blockFrom, blockTo uint64, chainConfig *chain.Config, tmpDir, snapDir string, chainDB kv.RoDB, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error { - firstTxNum := blockReader.FirstTxnNumNotInSnapshots() for i := blockFrom; i < blockTo; i = chooseSegmentEnd(i, blockTo, coresnaptype.Enums.Headers, chainConfig) { lastTxNum, err := dumpBlocksRange(ctx, i, chooseSegmentEnd(i, blockTo, coresnaptype.Enums.Headers, chainConfig), tmpDir, snapDir, firstTxNum, chainDB, chainConfig, workers, lvl, logger) diff --git a/turbo/snapshotsync/freezeblocks/block_snapshots_test.go b/turbo/snapshotsync/freezeblocks/block_snapshots_test.go index 9eae5a710c3..f795f6f2597 100644 --- a/turbo/snapshotsync/freezeblocks/block_snapshots_test.go +++ b/turbo/snapshotsync/freezeblocks/block_snapshots_test.go @@ -8,6 +8,7 @@ import ( "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" "github.com/ledgerwatch/erigon-lib/chain/networkname" "github.com/ledgerwatch/erigon-lib/chain/snapcfg" @@ -137,6 +138,7 @@ func TestMergeSnapshots(t *testing.T) { { merger := NewMerger(dir, 1, log.LvlInfo, nil, params.MainnetChainConfig, logger) merger.DisableFsync() + s.ReopenSegments(coresnaptype.BlockSnapshotTypes, false) ranges := merger.FindMergeRanges(s.Ranges(), s.SegmentsMax()) require.True(len(ranges) > 0) err := merger.Merge(context.Background(), s, coresnaptype.BlockSnapshotTypes, ranges, s.Dir(), false, nil, nil) @@ -153,57 +155,91 @@ func TestMergeSnapshots(t *testing.T) { { merger := NewMerger(dir, 1, log.LvlInfo, nil, params.MainnetChainConfig, logger) merger.DisableFsync() + s.ReopenSegments(coresnaptype.BlockSnapshotTypes, false) ranges := merger.FindMergeRanges(s.Ranges(), s.SegmentsMax()) require.True(len(ranges) == 0) err := merger.Merge(context.Background(), s, coresnaptype.BlockSnapshotTypes, ranges, s.Dir(), false, nil, nil) require.NoError(err) } - expectedFileName = snaptype.SegmentFileName(coresnaptype.Transactions.Versions().Current, 600_000, 700_000, coresnaptype.Transactions.Enum()) - d, err = seg.NewDecompressor(filepath.Join(dir, expectedFileName)) - require.NoError(err) - defer d.Close() - a = d.Count() - require.Equal(10, a) + // [0; N] merges are not supported anymore + + // expectedFileName = snaptype.SegmentFileName(coresnaptype.Transactions.Versions().Current, 600_000, 700_000, coresnaptype.Transactions.Enum()) + // d, err = seg.NewDecompressor(filepath.Join(dir, expectedFileName)) + // require.NoError(err) + // defer d.Close() + // a = d.Count() + // require.Equal(10, a) + + // start := uint64(19_000_000) + // for i := uint64(0); i < N; i++ { + // createFile(start+i*10_000, start+(i+1)*10_000) + // } + // s = NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, start, logger) + // defer s.Close() + // require.NoError(s.ReopenFolder()) + // { + // merger := NewMerger(dir, 1, log.LvlInfo, nil, params.MainnetChainConfig, logger) + // merger.DisableFsync() + // fmt.Println(s.Ranges(), s.SegmentsMax()) + // fmt.Println(s.Ranges(), s.SegmentsMax()) + // ranges := merger.FindMergeRanges(s.Ranges(), s.SegmentsMax()) + // require.True(len(ranges) > 0) + // err := merger.Merge(context.Background(), s, coresnaptype.BlockSnapshotTypes, ranges, s.Dir(), false, nil, nil) + // require.NoError(err) + // } + + // expectedFileName = snaptype.SegmentFileName(coresnaptype.Transactions.Versions().Current, start+100_000, start+200_000, coresnaptype.Transactions.Enum()) + // d, err = seg.NewDecompressor(filepath.Join(dir, expectedFileName)) + // require.NoError(err) + // defer d.Close() + // a = d.Count() + // require.Equal(10, a) + + // { + // merger := NewMerger(dir, 1, log.LvlInfo, nil, params.MainnetChainConfig, logger) + // merger.DisableFsync() + // s.ReopenSegments(coresnaptype.BlockSnapshotTypes, false) + // ranges := merger.FindMergeRanges(s.Ranges(), s.SegmentsMax()) + // require.True(len(ranges) == 0) + // err := merger.Merge(context.Background(), s, coresnaptype.BlockSnapshotTypes, ranges, s.Dir(), false, nil, nil) + // require.NoError(err) + // } + + // expectedFileName = snaptype.SegmentFileName(coresnaptype.Transactions.Versions().Current, start+600_000, start+700_000, coresnaptype.Transactions.Enum()) + // d, err = seg.NewDecompressor(filepath.Join(dir, expectedFileName)) + // require.NoError(err) + // defer d.Close() + // a = d.Count() + // require.Equal(10, a) +} + +func TestDeleteSnapshots(t *testing.T) { + logger := log.New() + dir, require := t.TempDir(), require.New(t) + createFile := func(from, to uint64) { + for _, snT := range coresnaptype.BlockSnapshotTypes { + createTestSegmentFile(t, from, to, snT.Enum(), dir, 1, logger) + } + } + + N := uint64(70) - start := uint64(19_000_000) for i := uint64(0); i < N; i++ { - createFile(start+i*10_000, start+(i+1)*10_000) + createFile(i*10_000, (i+1)*10_000) } - s = NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, start, logger) + s := NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 0, logger) defer s.Close() - require.NoError(s.ReopenFolder()) - { - merger := NewMerger(dir, 1, log.LvlInfo, nil, params.MainnetChainConfig, logger) - merger.DisableFsync() - ranges := merger.FindMergeRanges(s.Ranges(), s.SegmentsMax()) - require.True(len(ranges) > 0) - err := merger.Merge(context.Background(), s, coresnaptype.BlockSnapshotTypes, ranges, s.Dir(), false, nil, nil) - require.NoError(err) + retireFiles := []string{ + "v1-000000-000010-bodies.seg", + "v1-000000-000010-headers.seg", + "v1-000000-000010-transactions.seg", } - - expectedFileName = snaptype.SegmentFileName(coresnaptype.Transactions.Versions().Current, start+100_000, start+200_000, coresnaptype.Transactions.Enum()) - d, err = seg.NewDecompressor(filepath.Join(dir, expectedFileName)) - require.NoError(err) - defer d.Close() - a = d.Count() - require.Equal(10, a) - - { - merger := NewMerger(dir, 1, log.LvlInfo, nil, params.MainnetChainConfig, logger) - merger.DisableFsync() - ranges := merger.FindMergeRanges(s.Ranges(), s.SegmentsMax()) - require.True(len(ranges) == 0) - err := merger.Merge(context.Background(), s, coresnaptype.BlockSnapshotTypes, ranges, s.Dir(), false, nil, nil) - require.NoError(err) + require.NoError(s.ReopenFolder()) + for _, f := range retireFiles { + require.NoError(s.Delete(f)) + require.False(slices.Contains(s.Files(), f)) } - - expectedFileName = snaptype.SegmentFileName(coresnaptype.Transactions.Versions().Current, start+600_000, start+700_000, coresnaptype.Transactions.Enum()) - d, err = seg.NewDecompressor(filepath.Join(dir, expectedFileName)) - require.NoError(err) - defer d.Close() - a = d.Count() - require.Equal(10, a) } func TestRemoveOverlaps(t *testing.T) { @@ -234,7 +270,7 @@ func TestRemoveOverlaps(t *testing.T) { s := NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 0, logger) defer s.Close() - require.NoError(s.ReopenFolder()) + require.NoError(s.ReopenSegments(coresnaptype.BlockSnapshotTypes, false)) list, err := snaptype.Segments(s.dir) require.NoError(err) @@ -312,7 +348,8 @@ func TestOpenAllSnapshot(t *testing.T) { err = s.ReopenFolder() require.NoError(err) require.NotNil(getSegs(coresnaptype.Enums.Headers)) - require.Equal(0, len(getSegs(coresnaptype.Enums.Headers).segments)) + s.ReopenSegments(coresnaptype.BlockSnapshotTypes, false) + require.Equal(1, len(getSegs(coresnaptype.Enums.Headers).segments)) s.Close() createFile(0, 500_000, coresnaptype.Bodies) diff --git a/turbo/snapshotsync/snapshotsync.go b/turbo/snapshotsync/snapshotsync.go index 1578eac4569..6cfdba055d8 100644 --- a/turbo/snapshotsync/snapshotsync.go +++ b/turbo/snapshotsync/snapshotsync.go @@ -4,7 +4,10 @@ import ( "context" "encoding/binary" "fmt" + "math" "runtime" + "sort" + "strconv" "strings" "time" @@ -12,6 +15,7 @@ import ( "github.com/ledgerwatch/erigon-lib/chain/snapcfg" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/dbg" + "github.com/ledgerwatch/erigon-lib/config3" "github.com/ledgerwatch/erigon-lib/diagnostics" "github.com/ledgerwatch/erigon-lib/downloader/downloadergrpc" "github.com/ledgerwatch/erigon-lib/downloader/snaptype" @@ -23,6 +27,7 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" coresnaptype "github.com/ledgerwatch/erigon/core/snaptype" + "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/turbo/services" ) @@ -66,11 +71,197 @@ func RequestSnapshotsDownload(ctx context.Context, downloadRequest []services.Do return nil } +func adjustStepPrune(steps uint64) uint64 { + if steps == 0 { + return 0 + } + if steps < snaptype.Erigon3SeedableSteps { + return snaptype.Erigon3SeedableSteps + } + if steps%snaptype.Erigon3SeedableSteps == 0 { + return steps + } + // round to nearest multiple of 64. if less than 64, round to 64 + return steps + steps%snaptype.Erigon3SeedableSteps +} + +func adjustBlockPrune(blocks, minBlocksToDownload uint64) uint64 { + if minBlocksToDownload < snaptype.Erigon2MergeLimit { + minBlocksToDownload = snaptype.Erigon2MergeLimit + } + if blocks < minBlocksToDownload { + blocks = minBlocksToDownload + } + if blocks%snaptype.Erigon2MergeLimit == 0 { + return blocks + } + ret := blocks + snaptype.Erigon2MergeLimit + // round to nearest multiple of 64. if less than 64, round to 64 + return ret - ret%snaptype.Erigon2MergeLimit +} + +func shouldUseStepsForPruning(name string) bool { + return strings.HasPrefix(name, "idx") || strings.HasPrefix(name, "history") +} + +func canSnapshotBePruned(name string) bool { + return strings.HasPrefix(name, "idx") || strings.HasPrefix(name, "history") || strings.Contains(name, "transactions") +} + +func buildBlackListForPruning(pruneMode bool, stepPrune, minBlockToDownload, blockPrune uint64, preverified snapcfg.Preverified) (map[string]struct{}, error) { + type snapshotFileData struct { + from, to uint64 + stepBased bool + name string + } + blackList := make(map[string]struct{}) + if !pruneMode { + return blackList, nil + } + stepPrune = adjustStepPrune(stepPrune) + blockPrune = adjustBlockPrune(blockPrune, minBlockToDownload) + snapshotKindToNames := make(map[string][]snapshotFileData) + for _, p := range preverified { + name := p.Name + // Dont prune unprunable files + if !canSnapshotBePruned(name) { + continue + } + var from, to uint64 + var err error + var kind string + if shouldUseStepsForPruning(name) { + // parse "from" (0) and "to" (64) from the name + // parse the snapshot "kind". e.g kind of 'idx/v1-accounts.0-64.ef' is "idx/v1-accounts" + rangeString := strings.Split(name, ".")[1] + rangeNums := strings.Split(rangeString, "-") + // convert the range to uint64 + from, err = strconv.ParseUint(rangeNums[0], 10, 64) + if err != nil { + return nil, err + } + to, err = strconv.ParseUint(rangeNums[1], 10, 64) + if err != nil { + return nil, err + } + kind = strings.Split(name, ".")[0] + } else { + // e.g 'v1-000000-000100-beaconblocks.seg' + // parse "from" (000000) and "to" (000100) from the name. 100 is 100'000 blocks + minusSplit := strings.Split(name, "-") + s, _, ok := snaptype.ParseFileName("", name) + if !ok { + continue + } + from = s.From + to = s.To + kind = minusSplit[3] + } + blackList[p.Name] = struct{}{} // Add all of them to the blacklist and remove the ones that are not blacklisted later. + snapshotKindToNames[kind] = append(snapshotKindToNames[kind], snapshotFileData{ + from: from, + to: to, + stepBased: shouldUseStepsForPruning(name), + name: name, + }) + } + // sort the snapshots by "from" and "to" in ascending order + for _, snapshots := range snapshotKindToNames { + prunedDistance := uint64(0) // keep track of pruned distance for snapshots + // sort the snapshots by "from" and "to" in descending order + sort.Slice(snapshots, func(i, j int) bool { + if snapshots[i].from == snapshots[j].from { + return snapshots[i].to > snapshots[j].to + } + return snapshots[i].from > snapshots[j].from + }) + for _, snapshot := range snapshots { + if snapshot.stepBased { + if prunedDistance >= stepPrune { + break + } + } else if prunedDistance >= blockPrune { + break + } + delete(blackList, snapshot.name) + prunedDistance += snapshot.to - snapshot.from + } + } + return blackList, nil +} + +// getMinimumBlocksToDownload - get the minimum number of blocks to download +func getMinimumBlocksToDownload(tx kv.Tx, blockReader services.FullBlockReader, minStep uint64, expectedPruneBlockAmount, expectedPruneHistoryAmount uint64) (uint64, uint64, error) { + frozenBlocks := blockReader.Snapshots().SegmentsMax() + minToDownload := uint64(math.MaxUint64) + minStepToDownload := minStep + stateTxNum := minStep * config3.HistoryV3AggregationStep + if err := blockReader.IterateFrozenBodies(func(blockNum, baseTxNum, txAmount uint64) error { + if blockNum == frozenBlocks-expectedPruneHistoryAmount { + minStepToDownload = (baseTxNum / config3.HistoryV3AggregationStep) - 1 + } + if stateTxNum <= baseTxNum { // only cosnider the block if it + return nil + } + newMinToDownload := uint64(0) + if frozenBlocks > blockNum { + newMinToDownload = frozenBlocks - blockNum + } + if newMinToDownload < minToDownload { + minToDownload = newMinToDownload + } + return nil + }); err != nil { + return 0, 0, err + } + if expectedPruneBlockAmount == 0 { + return minToDownload, 0, nil + } + // return the minimum number of blocks to download and the minimum step. + return minToDownload, minStep - minStepToDownload, nil +} + +func getMaxStepRangeInSnapshots(preverified snapcfg.Preverified) (uint64, error) { + maxTo := uint64(0) + for _, p := range preverified { + // take the "to" from "domain" snapshot + if !strings.HasPrefix(p.Name, "domain") { + continue + } + rangeString := strings.Split(p.Name, ".")[1] + rangeNums := strings.Split(rangeString, "-") + // convert the range to uint64 + to, err := strconv.ParseUint(rangeNums[1], 10, 64) + if err != nil { + return 0, err + } + if to > maxTo { + maxTo = to + } + } + return maxTo, nil +} + +func computeBlocksToPrune(blockReader services.FullBlockReader, p prune.Mode) (blocksToPrune uint64, historyToPrune uint64) { + frozenBlocks := blockReader.Snapshots().SegmentsMax() + blocksPruneTo := p.Blocks.PruneTo(frozenBlocks) + historyPruneTo := p.History.PruneTo(frozenBlocks) + if blocksPruneTo <= frozenBlocks { + blocksToPrune = frozenBlocks - blocksPruneTo + } + if historyPruneTo <= frozenBlocks { + historyToPrune = frozenBlocks - historyPruneTo + } + return blocksToPrune, historyToPrune +} + // WaitForDownloader - wait for Downloader service to download all expected snapshots // for MVP we sync with Downloader only once, in future will send new snapshots also -func WaitForDownloader(ctx context.Context, logPrefix string, histV3, blobs bool, caplin CaplinMode, agg *state.Aggregator, tx kv.RwTx, blockReader services.FullBlockReader, cc *chain.Config, snapshotDownloader proto_downloader.DownloaderClient, stagesIdsList []string) error { +func WaitForDownloader(ctx context.Context, logPrefix string, headerchain, histV3, blobs bool, prune prune.Mode, caplin CaplinMode, agg *state.Aggregator, tx kv.RwTx, blockReader services.FullBlockReader, cc *chain.Config, snapshotDownloader proto_downloader.DownloaderClient, stagesIdsList []string) error { snapshots := blockReader.Snapshots() borSnapshots := blockReader.BorSnapshots() + + // Find minimum block to download. if blockReader.FreezingCfg().NoDownloader || snapshotDownloader == nil { if err := snapshots.ReopenFolder(); err != nil { return err @@ -83,9 +274,11 @@ func WaitForDownloader(ctx context.Context, logPrefix string, histV3, blobs bool return nil } - snapshots.Close() - if cc.Bor != nil { - borSnapshots.Close() + if headerchain { + snapshots.Close() + if cc.Bor != nil { + borSnapshots.Close() + } } //Corner cases: @@ -98,6 +291,24 @@ func WaitForDownloader(ctx context.Context, logPrefix string, histV3, blobs bool preverifiedBlockSnapshots := snapCfg.Preverified downloadRequest := make([]services.DownloadRequest, 0, len(preverifiedBlockSnapshots)) + blockPrune, historyPrune := computeBlocksToPrune(blockReader, prune) + blackListForPruning := make(map[string]struct{}) + wantToPrune := prune.Blocks.Enabled() || prune.History.Enabled() + if !headerchain && wantToPrune { + minStep, err := getMaxStepRangeInSnapshots(preverifiedBlockSnapshots) + if err != nil { + return err + } + minBlockAmountToDownload, minStepToDownload, err := getMinimumBlocksToDownload(tx, blockReader, minStep, blockPrune, historyPrune) + if err != nil { + return err + } + blackListForPruning, err = buildBlackListForPruning(wantToPrune, minStepToDownload, minBlockAmountToDownload, blockPrune, preverifiedBlockSnapshots) + if err != nil { + return err + } + } + // build all download requests for _, p := range preverifiedBlockSnapshots { if !histV3 { @@ -114,6 +325,13 @@ func WaitForDownloader(ctx context.Context, logPrefix string, histV3, blobs bool if !blobs && strings.Contains(p.Name, "blobsidecars") { continue } + if headerchain && !strings.Contains(p.Name, "headers") && !strings.Contains(p.Name, "bodies") { + continue + } + if _, ok := blackListForPruning[p.Name]; ok { + continue + } + downloadRequest = append(downloadRequest, services.NewDownloadRequest(p.Name, p.Hash)) } @@ -209,6 +427,23 @@ func WaitForDownloader(ctx context.Context, logPrefix string, histV3, blobs bool // after the initial call the downloader or snapshot-lock.file will prevent this download from running // + // prohibit new downloads for the files that were downloaded + + // If we only download headers and bodies, we should prohibit only those. + if headerchain { + if _, err := snapshotDownloader.ProhibitNewDownloads(ctx, &proto_downloader.ProhibitNewDownloadsRequest{ + Type: coresnaptype.Bodies.Name(), + }); err != nil { + return err + } + if _, err := snapshotDownloader.ProhibitNewDownloads(ctx, &proto_downloader.ProhibitNewDownloadsRequest{ + Type: coresnaptype.Headers.Name(), + }); err != nil { + return err + } + return nil + } + // prohibits further downloads, except some exceptions for _, p := range blockReader.AllTypes() { if _, err := snapshotDownloader.ProhibitNewDownloads(ctx, &proto_downloader.ProhibitNewDownloadsRequest{ diff --git a/turbo/snapshotsync/snapshotsync_test.go b/turbo/snapshotsync/snapshotsync_test.go new file mode 100644 index 00000000000..284a7b2646e --- /dev/null +++ b/turbo/snapshotsync/snapshotsync_test.go @@ -0,0 +1,43 @@ +package snapshotsync + +import ( + "strings" + "testing" + + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" + "github.com/ledgerwatch/erigon-lib/downloader/snaptype" +) + +func TestBlackListForPruning(t *testing.T) { + preverified := snapcfg.Mainnet + + maxStep, err := getMaxStepRangeInSnapshots(preverified) + if err != nil { + t.Fatal(err) + } + // Prune 64 steps and contain at least all the blocks + blackList, err := buildBlackListForPruning(true, 64, 100_000, 25_000_000, preverified) + if err != nil { + t.Fatal(err) + } + for p := range blackList { + // take the snapshot file name and parse it to get the "from" + info, _, ok := snaptype.ParseFileName("tmp", p) + if !ok { + continue + } + if strings.Contains(p, "transactions") { + if info.From < 19_000_000 { + t.Errorf("Should have pruned %s", p) + } + continue + } + if strings.Contains(p, "domain") { + t.Errorf("Should not have pruned %s", p) + } + if info.To == maxStep { + t.Errorf("Should not have pruned %s", p) + } + } + +} diff --git a/turbo/stages/mock/mock_sentry.go b/turbo/stages/mock/mock_sentry.go index 00cf2059d3b..b92310835eb 100644 --- a/turbo/stages/mock/mock_sentry.go +++ b/turbo/stages/mock/mock_sentry.go @@ -495,7 +495,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK mock.Sync = stagedsync.New( cfg.Sync, stagedsync.DefaultStages(mock.Ctx, - stagedsync.StageSnapshotsCfg(mock.DB, *mock.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, mock.BlockReader, mock.Notifications, mock.HistoryV3, mock.agg, false, false, nil), + stagedsync.StageSnapshotsCfg(mock.DB, *mock.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, mock.BlockReader, mock.Notifications, mock.HistoryV3, mock.agg, false, false, nil, prune), stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, cfg.Sync, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, mock.BlockReader, blockWriter, dirs.Tmp, mock.HistoryV3, mock.Notifications, nil), stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, stagedsync.MiningState{}, *mock.ChainConfig, nil /* heimdallClient */, mock.BlockReader, nil, nil, nil, recents, signatures, false, nil), stagedsync.StageBlockHashesCfg(mock.DB, mock.Dirs.Tmp, mock.ChainConfig, blockWriter), diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 6a097b17da7..f50d7182a34 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -564,7 +564,7 @@ func NewDefaultStages(ctx context.Context, } return stagedsync.DefaultStages(ctx, - stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm), + stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm, cfg.Prune), stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, cfg.HistoryV3, notifications, loopBreakCheck), stagedsync.StageBorHeimdallCfg(db, snapDb, stagedsync.MiningState{}, *controlServer.ChainConfig, heimdallClient, blockReader, controlServer.Hd, controlServer.Penalize, loopBreakCheck, recents, signatures, cfg.WithHeimdallWaypointRecording, nil), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), @@ -630,7 +630,7 @@ func NewPipelineStages(ctx context.Context, if len(cfg.Sync.UploadLocation) == 0 { return stagedsync.PipelineStages(ctx, - stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm), + stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm, cfg.Prune), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, loopBreakCheck), stagedsync.StageExecuteBlocksCfg( @@ -664,7 +664,7 @@ func NewPipelineStages(ctx context.Context, } return stagedsync.UploaderPipelineStages(ctx, - stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm), + stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm, cfg.Prune), stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, cfg.HistoryV3, notifications, loopBreakCheck), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, loopBreakCheck), @@ -770,6 +770,7 @@ func NewPolygonSyncStages( config.InternalCL && config.CaplinConfig.Backfilling, config.CaplinConfig.BlobBackfilling, silkworm, + config.Prune, ), stagedsync.StageBlockHashesCfg( db, From 4145048fe2e156d29c7718eb927432a89443be2e Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Fri, 10 May 2024 20:40:13 +0700 Subject: [PATCH 16/48] e3: mainnet 1536 steps (19.8M) (#10275) --- erigon-lib/go.mod | 2 +- erigon-lib/go.sum | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/erigon-lib/go.mod b/erigon-lib/go.mod index 27d6814cfcf..05a19039c31 100644 --- a/erigon-lib/go.mod +++ b/erigon-lib/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/erigontech/mdbx-go v0.38.0 - github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510043237-3afa4a26e31d + github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510125329-7b93b3d7e99f github.com/ledgerwatch/interfaces v0.0.0-20240510032129-13f644ca2307 github.com/ledgerwatch/log/v3 v3.9.0 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/erigon-lib/go.sum b/erigon-lib/go.sum index 822c6c02951..ef7893a67aa 100644 --- a/erigon-lib/go.sum +++ b/erigon-lib/go.sum @@ -270,8 +270,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510043237-3afa4a26e31d h1:pOxi2nvq6A5cQ2k7n08tXTqVxNAvbUTuzF8bZhb6ooQ= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510043237-3afa4a26e31d/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510125329-7b93b3d7e99f h1:vOUz9rYvrFWc84nuPUxReQj7OhU7QYWJCNXbH0NMPvI= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510125329-7b93b3d7e99f/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/interfaces v0.0.0-20240510032129-13f644ca2307 h1:v2syJaHSCTSEnzwFUW4F6FL92ZAnKEoyBesnm2E/IEU= github.com/ledgerwatch/interfaces v0.0.0-20240510032129-13f644ca2307/go.mod h1:ugQv1QllJzBny3cKZKxUrSnykkjkBgm27eQM6dnGAcc= github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= diff --git a/go.mod b/go.mod index 9237d08048f..eb99e2dde5a 100644 --- a/go.mod +++ b/go.mod @@ -186,7 +186,7 @@ require ( github.com/koron/go-ssdp v0.0.4 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510043237-3afa4a26e31d // indirect + github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510125329-7b93b3d7e99f // indirect github.com/ledgerwatch/erigonwatch v0.1.0 github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect diff --git a/go.sum b/go.sum index 6539a0b7ba4..f96523c38cb 100644 --- a/go.sum +++ b/go.sum @@ -539,8 +539,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510043237-3afa4a26e31d h1:pOxi2nvq6A5cQ2k7n08tXTqVxNAvbUTuzF8bZhb6ooQ= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510043237-3afa4a26e31d/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510125329-7b93b3d7e99f h1:vOUz9rYvrFWc84nuPUxReQj7OhU7QYWJCNXbH0NMPvI= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510125329-7b93b3d7e99f/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/erigonwatch v0.1.0 h1:TrCjklOu9ZI9/uiMigo1Jnknnk1I/dXUxXymA3xHfzo= github.com/ledgerwatch/erigonwatch v0.1.0/go.mod h1:uYq4hs3RL1OtIYRXAxYq02tpdGkx6rtXlpzdazDDbWI= github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= From 2f802f352940022a6671e94782587afd5bb6dacc Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Fri, 10 May 2024 21:43:35 +0700 Subject: [PATCH 17/48] e3: set dirty-space for chaindb to 512mb (#10268) Copy of PR #10269 --- node/node.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/node.go b/node/node.go index ce5f804f7e1..b2d79467ac3 100644 --- a/node/node.go +++ b/node/node.go @@ -345,7 +345,7 @@ func OpenDatabase(ctx context.Context, config *nodecfg.Config, label kv.Label, n if config.MdbxGrowthStep > 0 { opts = opts.GrowthStep(config.MdbxGrowthStep) } - opts = opts.DirtySpace(uint64(128 * datasize.MB)) + opts = opts.DirtySpace(uint64(512 * datasize.MB)) case kv.ConsensusDB: if config.MdbxPageSize.Bytes() > 0 { opts = opts.PageSize(config.MdbxPageSize.Bytes()) From 4e26eb01ffcf91d643be4e6aeebd5014d1f97737 Mon Sep 17 00:00:00 2001 From: awskii Date: Fri, 10 May 2024 17:38:49 +0100 Subject: [PATCH 18/48] E3 commitment tweaks (#10049) Co-authored-by: alex.sharov --- erigon-lib/commitment/bin_patricia_hashed.go | 14 +- erigon-lib/commitment/commitment.go | 354 +++++++++++++--- .../commitment/commitment_bench_test.go | 102 +++++ erigon-lib/commitment/commitment_test.go | 163 +++++++- erigon-lib/commitment/hex_patricia_hashed.go | 139 +++++-- .../hex_patricia_hashed_bench_test.go | 2 +- erigon-lib/state/aggregator_files.go | 1 - erigon-lib/state/btree_index.go | 23 +- erigon-lib/state/domain.go | 2 + erigon-lib/state/domain_committed.go | 377 +++++------------- erigon-lib/state/domain_shared.go | 157 ++++---- erigon-lib/state/domain_shared_bench_test.go | 46 ++- erigon-lib/state/domain_test.go | 13 +- eth/stagedsync/stage_execute.go | 7 +- eth/stagedsync/stage_trie3.go | 4 +- 15 files changed, 912 insertions(+), 492 deletions(-) create mode 100644 erigon-lib/commitment/commitment_bench_test.go diff --git a/erigon-lib/commitment/bin_patricia_hashed.go b/erigon-lib/commitment/bin_patricia_hashed.go index 6db6f0eff20..33deb3a9826 100644 --- a/erigon-lib/commitment/bin_patricia_hashed.go +++ b/erigon-lib/commitment/bin_patricia_hashed.go @@ -1059,7 +1059,7 @@ func (bph *BinPatriciaHashed) fold() (err error) { upBinaryCell.extLen = 0 upBinaryCell.downHashedLen = 0 if bph.branchBefore[row] { - _, err = bph.branchEncoder.CollectUpdate(updateKey, 0, bph.touchMap[row], 0, RetrieveCellNoop) + _, err = bph.branchEncoder.CollectUpdate(bph.ctx, updateKey, 0, bph.touchMap[row], 0, RetrieveCellNoop) if err != nil { return fmt.Errorf("failed to encode leaf node update: %w", err) } @@ -1087,7 +1087,7 @@ func (bph *BinPatriciaHashed) fold() (err error) { upBinaryCell.fillFromLowerBinaryCell(cell, depth, bph.currentKey[upDepth:bph.currentKeyLen], nibble) // Delete if it existed if bph.branchBefore[row] { - _, err = bph.branchEncoder.CollectUpdate(updateKey, 0, bph.touchMap[row], 0, RetrieveCellNoop) + _, err = bph.branchEncoder.CollectUpdate(bph.ctx, updateKey, 0, bph.touchMap[row], 0, RetrieveCellNoop) if err != nil { return fmt.Errorf("failed to encode leaf node update: %w", err) } @@ -1162,7 +1162,7 @@ func (bph *BinPatriciaHashed) fold() (err error) { var err error _ = cellGetter - lastNibble, err = bph.branchEncoder.CollectUpdate(updateKey, bitmap, bph.touchMap[row], bph.afterMap[row], cellGetter) + lastNibble, err = bph.branchEncoder.CollectUpdate(bph.ctx, updateKey, bitmap, bph.touchMap[row], bph.afterMap[row], cellGetter) if err != nil { return fmt.Errorf("failed to encode branch update: %w", err) } @@ -1366,7 +1366,7 @@ func (bph *BinPatriciaHashed) ProcessKeys(ctx context.Context, plainKeys [][]byt if err != nil { return nil, fmt.Errorf("root hash evaluation failed: %w", err) } - err = bph.branchEncoder.Load(loadToPatriciaContextFunc(bph.ctx), etl.TransformArgs{Quit: ctx.Done()}) + err = bph.branchEncoder.Load(bph.ctx, etl.TransformArgs{Quit: ctx.Done()}) if err != nil { return nil, fmt.Errorf("branch update failed: %w", err) } @@ -1530,6 +1530,10 @@ func (bph *BinPatriciaHashed) SetState(buf []byte) error { return nil } +func (bph *BinPatriciaHashed) ProcessTree(ctx context.Context, t *UpdateTree, lp string) (rootHash []byte, err error) { + panic("not implemented") +} + func (bph *BinPatriciaHashed) ProcessUpdates(ctx context.Context, plainKeys [][]byte, updates []Update) (rootHash []byte, err error) { for i, pk := range plainKeys { updates[i].hashedKey = hexToBin(pk) @@ -1615,7 +1619,7 @@ func (bph *BinPatriciaHashed) ProcessUpdates(ctx context.Context, plainKeys [][] return nil, fmt.Errorf("root hash evaluation failed: %w", err) } - err = bph.branchEncoder.Load(loadToPatriciaContextFunc(bph.ctx), etl.TransformArgs{Quit: ctx.Done()}) + err = bph.branchEncoder.Load(bph.ctx, etl.TransformArgs{Quit: ctx.Done()}) if err != nil { return nil, fmt.Errorf("branch update failed: %w", err) } diff --git a/erigon-lib/commitment/commitment.go b/erigon-lib/commitment/commitment.go index 33d8e05825b..d15562c7fcf 100644 --- a/erigon-lib/commitment/commitment.go +++ b/erigon-lib/commitment/commitment.go @@ -5,16 +5,18 @@ import ( "context" "encoding/binary" "fmt" - "hash" "math/bits" "strings" - "github.com/ledgerwatch/log/v3" + "github.com/google/btree" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/cryptozerocopy" + "github.com/ledgerwatch/erigon-lib/types" "golang.org/x/crypto/sha3" "github.com/ledgerwatch/erigon-lib/metrics" + "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/etl" ) @@ -41,6 +43,8 @@ type Trie interface { // Set context for state IO ResetContext(ctx PatriciaContext) + ProcessTree(ctx context.Context, tree *UpdateTree, logPrefix string) (rootHash []byte, err error) + // Reads updates from storage ProcessKeys(ctx context.Context, pk [][]byte, logPrefix string) (rootHash []byte, err error) @@ -72,14 +76,19 @@ const ( VariantBinPatriciaTrie TrieVariant = "bin-patricia-hashed" ) -func InitializeTrie(tv TrieVariant) Trie { +func InitializeTrieAndUpdateTree(tv TrieVariant, mode Mode, tmpdir string) (Trie, *UpdateTree) { switch tv { case VariantBinPatriciaTrie: - return NewBinPatriciaHashed(length.Addr, nil) + trie := NewBinPatriciaHashed(length.Addr, nil) + fn := func(key []byte) []byte { return hexToBin(key) } + tree := NewUpdateTree(mode, tmpdir, fn) + return trie, tree case VariantHexPatriciaTrie: fallthrough default: - return NewHexPatriciaHashed(length.Addr, nil) + trie := NewHexPatriciaHashed(length.Addr, nil) + tree := NewUpdateTree(mode, tmpdir, trie.hashAndNibblizeKey) + return trie, tree } } @@ -145,6 +154,7 @@ func (branchData BranchData) String() string { type BranchEncoder struct { buf *bytes.Buffer bitmapBuf [binary.MaxVarintLen64]byte + merger *BranchMerger updates *etl.Collector tmpdir string } @@ -153,6 +163,7 @@ func NewBranchEncoder(sz uint64, tmpdir string) *BranchEncoder { be := &BranchEncoder{ buf: bytes.NewBuffer(make([]byte, sz)), tmpdir: tmpdir, + merger: NewHexBranchMerger(sz / 2), } be.initCollector() return be @@ -163,26 +174,19 @@ func (be *BranchEncoder) initCollector() { be.updates.LogLvl(log.LvlDebug) } -// reads previous comitted value and merges current with it if needed. -func loadToPatriciaContextFunc(pc PatriciaContext) etl.LoadFunc { - return func(prefix, update []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { +func (be *BranchEncoder) Load(pc PatriciaContext, args etl.TransformArgs) error { + if err := be.updates.Load(nil, "", func(prefix, update []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { stateValue, stateStep, err := pc.GetBranch(prefix) if err != nil { return err } - // this updates ensures that if commitment is present, each branch are also present in commitment state at that moment with costs of storage - //fmt.Printf("commitment branch encoder merge prefix [%x] [%x]->[%x]\n%v\n", prefix, stateValue, update, BranchData(update).String()) cp, cu := common.Copy(prefix), common.Copy(update) // has to copy :( if err = pc.PutBranch(cp, cu, stateValue, stateStep); err != nil { return err } return nil - } -} - -func (be *BranchEncoder) Load(load etl.LoadFunc, args etl.TransformArgs) error { - if err := be.updates.Load(nil, "", load, args); err != nil { + }, args); err != nil { return err } be.initCollector() @@ -190,20 +194,35 @@ func (be *BranchEncoder) Load(load etl.LoadFunc, args etl.TransformArgs) error { } func (be *BranchEncoder) CollectUpdate( + ctx PatriciaContext, prefix []byte, bitmap, touchMap, afterMap uint16, readCell func(nibble int, skip bool) (*Cell, error), ) (lastNibble int, err error) { - v, ln, err := be.EncodeBranch(bitmap, touchMap, afterMap, readCell) + var update []byte + update, lastNibble, err = be.EncodeBranch(bitmap, touchMap, afterMap, readCell) if err != nil { return 0, err } - //fmt.Printf("collectBranchUpdate [%x] -> [%x]\n", prefix, []byte(v)) - if err := be.updates.Collect(prefix, v); err != nil { + + prev, prevStep, err := ctx.GetBranch(prefix) + _ = prevStep + if err != nil { return 0, err } - return ln, nil + if len(prev) > 0 { + update, err = be.merger.Merge(prev, update) + if err != nil { + return 0, err + } + } + //fmt.Printf("collectBranchUpdate [%x] -> [%x]\n", prefix, update) + if err = be.updates.Collect(prefix, update); err != nil { + return 0, err + } + mxCommitmentBranchUpdates.Inc() + return lastNibble, nil } // Encoded result should be copied before next call to EncodeBranch, underlying slice is reused @@ -453,7 +472,7 @@ func (branchData BranchData) MergeHexBranches(branchData2 BranchData, newData [] var bitmapBuf [4]byte binary.BigEndian.PutUint16(bitmapBuf[0:], touchMap1|touchMap2) binary.BigEndian.PutUint16(bitmapBuf[2:], afterMap2) - newData = append(newData, bitmapBuf[:]...) + newData = append(newData[:0], bitmapBuf[:]...) for bitset, j := bitmap1|bitmap2, 0; bitset != 0; j++ { bit := bitset & -bitset if bitmap2&bit != 0 { @@ -535,13 +554,12 @@ func (branchData BranchData) DecodeCells() (touchMap, afterMap uint16, row [16]* } type BranchMerger struct { - buf *bytes.Buffer - num [4]byte - keccak hash.Hash + buf []byte + num [4]byte } func NewHexBranchMerger(capacity uint64) *BranchMerger { - return &BranchMerger{buf: bytes.NewBuffer(make([]byte, capacity)), keccak: sha3.NewLegacyKeccak256()} + return &BranchMerger{buf: make([]byte, capacity)} } // MergeHexBranches combines two branchData, number 2 coming after (and potentially shadowing) number 1 @@ -567,19 +585,14 @@ func (m *BranchMerger) Merge(branch1 BranchData, branch2 BranchData) (BranchData binary.BigEndian.PutUint16(m.num[2:], afterMap2) dataPos := 4 - m.buf.Reset() - if _, err := m.buf.Write(m.num[:]); err != nil { - return nil, err - } + m.buf = append(m.buf[:0], m.num[:]...) for bitset, j := bitmap1|bitmap2, 0; bitset != 0; j++ { bit := bitset & -bitset if bitmap2&bit != 0 { // Add fields from branch2 fieldBits := PartFlags(branch2[pos2]) - if err := m.buf.WriteByte(byte(fieldBits)); err != nil { - return nil, err - } + m.buf = append(m.buf, byte(fieldBits)) pos2++ for i := 0; i < bits.OnesCount8(byte(fieldBits)); i++ { @@ -590,19 +603,14 @@ func (m *BranchMerger) Merge(branch1 BranchData, branch2 BranchData) (BranchData return nil, fmt.Errorf("MergeHexBranches branch2: size overflow for length") } - _, err := m.buf.Write(branch2[pos2 : pos2+n]) - if err != nil { - return nil, err - } + m.buf = append(m.buf, branch2[pos2:pos2+n]...) pos2 += n dataPos += n if len(branch2) < pos2+int(l) { return nil, fmt.Errorf("MergeHexBranches branch2 is too small: expected at least %d got %d bytes", pos2+int(l), len(branch2)) } if l > 0 { - if _, err := m.buf.Write(branch2[pos2 : pos2+int(l)]); err != nil { - return nil, err - } + m.buf = append(m.buf, branch2[pos2:pos2+int(l)]...) pos2 += int(l) dataPos += int(l) } @@ -612,9 +620,7 @@ func (m *BranchMerger) Merge(branch1 BranchData, branch2 BranchData) (BranchData add := (touchMap2&bit == 0) && (afterMap2&bit != 0) // Add fields from branchData1 fieldBits := PartFlags(branch1[pos1]) if add { - if err := m.buf.WriteByte(byte(fieldBits)); err != nil { - return nil, err - } + m.buf = append(m.buf, byte(fieldBits)) } pos1++ for i := 0; i < bits.OnesCount8(byte(fieldBits)); i++ { @@ -624,10 +630,9 @@ func (m *BranchMerger) Merge(branch1 BranchData, branch2 BranchData) (BranchData } else if n < 0 { return nil, fmt.Errorf("MergeHexBranches branch1: size overflow for length") } + if add { - if _, err := m.buf.Write(branch1[pos1 : pos1+n]); err != nil { - return nil, err - } + m.buf = append(m.buf, branch1[pos1:pos1+n]...) } pos1 += n if len(branch1) < pos1+int(l) { @@ -637,9 +642,7 @@ func (m *BranchMerger) Merge(branch1 BranchData, branch2 BranchData) (BranchData } if l > 0 { if add { - if _, err := m.buf.Write(branch1[pos1 : pos1+int(l)]); err != nil { - return nil, err - } + m.buf = append(m.buf, branch1[pos1:pos1+int(l)]...) } pos1 += int(l) } @@ -647,9 +650,7 @@ func (m *BranchMerger) Merge(branch1 BranchData, branch2 BranchData) (BranchData } bitset ^= bit } - target := make([]byte, m.buf.Len()) - copy(target, m.buf.Bytes()) - return target, nil + return m.buf, nil } func ParseTrieVariant(s string) TrieVariant { @@ -758,3 +759,254 @@ func DecodeBranchAndCollectStat(key, branch []byte, tv TrieVariant) *BranchStat } return stat } + +// Defines how to evaluate commitments +type Mode uint + +const ( + ModeDisabled Mode = 0 + ModeDirect Mode = 1 + ModeUpdate Mode = 2 +) + +func (m Mode) String() string { + switch m { + case ModeDisabled: + return "disabled" + case ModeDirect: + return "direct" + case ModeUpdate: + return "update" + default: + return "unknown" + } +} + +func ParseCommitmentMode(s string) Mode { + var mode Mode + switch s { + case "off": + mode = ModeDisabled + case "update": + mode = ModeUpdate + default: + mode = ModeDirect + } + return mode +} + +type UpdateTree struct { + keccak cryptozerocopy.KeccakState + hasher keyHasher + keys map[string]struct{} + tree *btree.BTreeG[*KeyUpdate] + mode Mode + tmpdir string +} + +type keyHasher func(key []byte) []byte + +func keyHasherNoop(key []byte) []byte { return key } + +func NewUpdateTree(m Mode, tmpdir string, hasher keyHasher) *UpdateTree { + t := &UpdateTree{ + keccak: sha3.NewLegacyKeccak256().(cryptozerocopy.KeccakState), + hasher: hasher, + tmpdir: tmpdir, + mode: m, + } + if t.mode == ModeDirect { + t.keys = make(map[string]struct{}) + } else if t.mode == ModeUpdate { + t.tree = btree.NewG[*KeyUpdate](64, keyUpdateLessFn) + } + return t +} + +// TouchPlainKey marks plainKey as updated and applies different fn for different key types +// (different behaviour for Code, Account and Storage key modifications). +func (t *UpdateTree) TouchPlainKey(key, val []byte, fn func(c *KeyUpdate, val []byte)) { + switch t.mode { + case ModeUpdate: + pivot, updated := &KeyUpdate{plainKey: key}, false + + t.tree.DescendLessOrEqual(pivot, func(item *KeyUpdate) bool { + if bytes.Equal(item.plainKey, pivot.plainKey) { + fn(item, val) + updated = true + } + return false + }) + if !updated { + pivot.update.plainKey = pivot.plainKey + pivot.update.hashedKey = t.hasher(pivot.plainKey) + fn(pivot, val) + t.tree.ReplaceOrInsert(pivot) + } + case ModeDirect: + t.keys[string(key)] = struct{}{} + default: + } +} + +func (t *UpdateTree) Size() (updates uint64) { + switch t.mode { + case ModeDirect: + return uint64(len(t.keys)) + case ModeUpdate: + return uint64(t.tree.Len()) + default: + return 0 + } +} + +func (t *UpdateTree) TouchAccount(c *KeyUpdate, val []byte) { + if len(val) == 0 { + c.update.Flags = DeleteUpdate + return + } + if c.update.Flags&DeleteUpdate != 0 { + c.update.Flags ^= DeleteUpdate + } + nonce, balance, chash := types.DecodeAccountBytesV3(val) + if c.update.Nonce != nonce { + c.update.Nonce = nonce + c.update.Flags |= NonceUpdate + } + if !c.update.Balance.Eq(balance) { + c.update.Balance.Set(balance) + c.update.Flags |= BalanceUpdate + } + if !bytes.Equal(chash, c.update.CodeHashOrStorage[:]) { + if len(chash) == 0 { + c.update.ValLength = length.Hash + copy(c.update.CodeHashOrStorage[:], EmptyCodeHash) + } else { + copy(c.update.CodeHashOrStorage[:], chash) + c.update.ValLength = length.Hash + c.update.Flags |= CodeUpdate + } + } +} + +func (t *UpdateTree) TouchStorage(c *KeyUpdate, val []byte) { + c.update.ValLength = len(val) + if len(val) == 0 { + c.update.Flags = DeleteUpdate + } else { + c.update.Flags |= StorageUpdate + copy(c.update.CodeHashOrStorage[:], val) + } +} + +func (t *UpdateTree) TouchCode(c *KeyUpdate, val []byte) { + t.keccak.Reset() + t.keccak.Write(val) + t.keccak.Read(c.update.CodeHashOrStorage[:]) + if c.update.Flags == DeleteUpdate && len(val) == 0 { + c.update.Flags = DeleteUpdate + c.update.ValLength = 0 + return + } + c.update.ValLength = length.Hash + if len(val) != 0 { + c.update.Flags |= CodeUpdate + } +} + +func (t *UpdateTree) Close() { + if t.keys != nil { + clear(t.keys) + } + if t.tree != nil { + t.tree.Clear(true) + t.tree = nil + } +} + +func (t *UpdateTree) HashSort(ctx context.Context, fn func(hk, pk []byte) error) error { + switch t.mode { + case ModeDirect: + collector := etl.NewCollector("commitment", t.tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize/4), log.Root().New("update-tree")) + defer collector.Close() + collector.LogLvl(log.LvlDebug) + collector.SortAndFlushInBackground(true) + + for k := range t.keys { + select { + case <-ctx.Done(): + return nil + default: + } + if err := collector.Collect(t.hasher([]byte(k)), []byte(k)); err != nil { + return err + } + } + clear(t.keys) + + err := collector.Load(nil, "", func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + return fn(k, v) + }, etl.TransformArgs{Quit: ctx.Done()}) + if err != nil { + return err + } + case ModeUpdate: + t.tree.Ascend(func(item *KeyUpdate) bool { + select { + case <-ctx.Done(): + return false + default: + } + + if err := fn(item.update.hashedKey, item.plainKey); err != nil { + return false + } + return true + }) + t.tree.Clear(true) + default: + return nil + } + return nil +} + +// Returns list of both plain and hashed keys. If .mode is ModeUpdate, updates also returned. +// No ordering guarantees is provided. +func (t *UpdateTree) List(clear bool) ([][]byte, []Update) { + switch t.mode { + case ModeDirect: + plainKeys := make([][]byte, 0, len(t.keys)) + err := t.HashSort(context.Background(), func(hk, pk []byte) error { + plainKeys = append(plainKeys, common.Copy(pk)) + return nil + }) + if err != nil { + return nil, nil + } + return plainKeys, nil + case ModeUpdate: + plainKeys := make([][]byte, t.tree.Len()) + updates := make([]Update, t.tree.Len()) + i := 0 + t.tree.Ascend(func(item *KeyUpdate) bool { + plainKeys[i], updates[i] = item.plainKey, item.update + i++ + return true + }) + if clear { + t.tree.Clear(true) + } + return plainKeys, updates + default: + return nil, nil + } +} + +type KeyUpdate struct { + plainKey []byte + update Update +} + +func keyUpdateLessFn(i, j *KeyUpdate) bool { + return bytes.Compare(i.plainKey, j.plainKey) < 0 +} diff --git a/erigon-lib/commitment/commitment_bench_test.go b/erigon-lib/commitment/commitment_bench_test.go new file mode 100644 index 00000000000..424eab422ed --- /dev/null +++ b/erigon-lib/commitment/commitment_bench_test.go @@ -0,0 +1,102 @@ +package commitment + +import ( + "encoding/binary" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/stretchr/testify/require" + "testing" +) + +func BenchmarkBranchMerger_Merge(b *testing.B) { + b.StopTimer() + row, bm := generateCellRow(b, 16) + + be := NewBranchEncoder(1024, b.TempDir()) + enc, _, err := be.EncodeBranch(bm, bm, bm, func(i int, skip bool) (*Cell, error) { + return row[i], nil + }) + require.NoError(b, err) + + var copies [16][]byte + var tm uint16 + am := bm + + for i := 15; i >= 0; i-- { + row[i] = nil + tm, bm, am = uint16(1<>1, am>>1 + enc1, _, err := be.EncodeBranch(bm, tm, am, func(i int, skip bool) (*Cell, error) { + return row[i], nil + }) + require.NoError(b, err) + + copies[i] = common.Copy(enc1) + } + + b.StartTimer() + bmg := NewHexBranchMerger(4096) + var ci int + for i := 0; i < b.N; i++ { + _, err := bmg.Merge(enc, copies[ci]) + if err != nil { + b.Fatal(err) + } + ci++ + if ci == len(copies) { + ci = 0 + } + } +} + +func BenchmarkBranchData_ReplacePlainKeys(b *testing.B) { + row, bm := generateCellRow(b, 16) + + cells, am := unfoldBranchDataFromString(b, "86e586e5082035e72a782b51d9c98548467e3f868294d923cdbbdf4ce326c867bd972c4a2395090109203b51781a76dc87640aea038e3fdd8adca94049aaa436735b162881ec159f6fb408201aa2fa41b5fb019e8abf8fc32800805a2743cfa15373cf64ba16f4f70e683d8e0404a192d9050404f993d9050404e594d90508208642542ff3ce7d63b9703e85eb924ab3071aa39c25b1651c6dda4216387478f10404bd96d905") + for i, c := range cells { + if c == nil { + continue + } + if c.apl > 0 { + offt, _ := binary.Uvarint(c.apk[:c.apl]) + b.Logf("%d apk %x, offt %d\n", i, c.apk[:c.apl], offt) + } + if c.spl > 0 { + offt, _ := binary.Uvarint(c.spk[:c.spl]) + b.Logf("%d spk %x offt %d\n", i, c.spk[:c.spl], offt) + } + + } + _ = cells + _ = am + + cg := func(nibble int, skip bool) (*Cell, error) { + return row[nibble], nil + } + + be := NewBranchEncoder(1024, b.TempDir()) + enc, _, err := be.EncodeBranch(bm, bm, bm, cg) + require.NoError(b, err) + + original := common.Copy(enc) + for i := 0; i < b.N; i++ { + target := make([]byte, 0, len(enc)) + oldKeys := make([][]byte, 0) + replaced, err := enc.ReplacePlainKeys(target, func(key []byte, isStorage bool) ([]byte, error) { + oldKeys = append(oldKeys, key) + if isStorage { + return key[:8], nil + } + return key[:4], nil + }) + require.NoError(b, err) + require.Truef(b, len(replaced) < len(enc), "replaced expected to be shorter than original enc") + + keyI := 0 + replacedBack, err := replaced.ReplacePlainKeys(nil, func(key []byte, isStorage bool) ([]byte, error) { + require.EqualValues(b, oldKeys[keyI][:4], key[:4]) + defer func() { keyI++ }() + return oldKeys[keyI], nil + }) + require.NoError(b, err) + require.EqualValues(b, original, replacedBack) + } +} diff --git a/erigon-lib/commitment/commitment_test.go b/erigon-lib/commitment/commitment_test.go index d794054ba6a..d60108f8b42 100644 --- a/erigon-lib/commitment/commitment_test.go +++ b/erigon-lib/commitment/commitment_test.go @@ -1,9 +1,12 @@ package commitment import ( + "bytes" + "context" "encoding/binary" "encoding/hex" "math/rand" + "sort" "testing" "github.com/ledgerwatch/erigon-lib/common" @@ -11,8 +14,8 @@ import ( "github.com/stretchr/testify/require" ) -func generateCellRow(t *testing.T, size int) (row []*Cell, bitmap uint16) { - t.Helper() +func generateCellRow(tb testing.TB, size int) (row []*Cell, bitmap uint16) { + tb.Helper() row = make([]*Cell, size) var bm uint16 @@ -20,24 +23,24 @@ func generateCellRow(t *testing.T, size int) (row []*Cell, bitmap uint16) { row[i] = new(Cell) row[i].hl = 32 n, err := rand.Read(row[i].h[:]) - require.NoError(t, err) - require.EqualValues(t, row[i].hl, n) + require.NoError(tb, err) + require.EqualValues(tb, row[i].hl, n) th := rand.Intn(120) switch { case th > 70: n, err = rand.Read(row[i].apk[:]) - require.NoError(t, err) + require.NoError(tb, err) row[i].apl = n case th > 20 && th <= 70: n, err = rand.Read(row[i].spk[:]) - require.NoError(t, err) + require.NoError(tb, err) row[i].spl = n case th <= 20: n, err = rand.Read(row[i].extension[:th]) row[i].extLen = n - require.NoError(t, err) - require.EqualValues(t, th, n) + require.NoError(tb, err) + require.EqualValues(tb, th, n) } bm |= uint16(1 << i) } @@ -81,6 +84,42 @@ func TestBranchData_MergeHexBranches2(t *testing.T) { } } +func TestBranchData_MergeHexBranches_ValueAliveAfterNewMerges(t *testing.T) { + t.Skip() + row, bm := generateCellRow(t, 16) + + be := NewBranchEncoder(1024, t.TempDir()) + enc, _, err := be.EncodeBranch(bm, bm, bm, func(i int, skip bool) (*Cell, error) { + return row[i], nil + }) + require.NoError(t, err) + + copies := make([][]byte, 16) + values := make([][]byte, len(copies)) + + merger := NewHexBranchMerger(8192) + + var tm uint16 + am := bm + + for i := 15; i >= 0; i-- { + row[i] = nil + tm, bm, am = uint16(1<>1, am>>1 + enc1, _, err := be.EncodeBranch(bm, tm, am, func(i int, skip bool) (*Cell, error) { + return row[i], nil + }) + require.NoError(t, err) + merged, err := merger.Merge(enc, enc1) + require.NoError(t, err) + + copies[i] = common.Copy(merged) + values[i] = merged + } + for i := 0; i < len(copies); i++ { + require.EqualValues(t, copies[i], values[i]) + } +} + func TestBranchData_MergeHexBranchesEmptyBranches(t *testing.T) { // Create a BranchMerger instance with sufficient capacity for testing. merger := NewHexBranchMerger(1024) @@ -115,20 +154,20 @@ func TestBranchData_MergeHexBranches3(t *testing.T) { } // helper to decode row of cells from string -func unfoldBranchDataFromString(t *testing.T, encs string) (row []*Cell, am uint16) { - t.Helper() +func unfoldBranchDataFromString(tb testing.TB, encs string) (row []*Cell, am uint16) { + tb.Helper() //encs := "0405040b04080f0b080d030204050b0502090805050d01060e060d070f0903090c04070a0d0a000e090b060b0c040c0700020e0b0c060b0106020c0607050a0b0209070d06040808" //encs := "37ad10eb75ea0fc1c363db0dda0cd2250426ee2c72787155101ca0e50804349a94b649deadcc5cddc0d2fd9fb358c2edc4e7912d165f88877b1e48c69efacf418e923124506fbb2fd64823fd41cbc10427c423" enc, err := hex.DecodeString(encs) - require.NoError(t, err) + require.NoError(tb, err) tm, am, origins, err := BranchData(enc).DecodeCells() - require.NoError(t, err) + require.NoError(tb, err) _, _ = tm, am - t.Logf("%s", BranchData(enc).String()) - //require.EqualValues(t, tm, am) + tb.Logf("%s", BranchData(enc).String()) + //require.EqualValues(tb, tm, am) //for i, c := range origins { // if c == nil { // continue @@ -249,3 +288,99 @@ func TestBranchData_ReplacePlainKeys_WithEmpty(t *testing.T) { require.EqualValues(t, orig, merged) }) } + +func TestNewUpdateTree(t *testing.T) { + t.Run("ModeUpdate", func(t *testing.T) { + ut := NewUpdateTree(ModeUpdate, t.TempDir(), keyHasherNoop) + + require.NotNil(t, ut.tree) + require.NotNil(t, ut.keccak) + require.Nil(t, ut.keys) + require.Equal(t, ModeUpdate, ut.mode) + }) + + t.Run("ModeDirect", func(t *testing.T) { + ut := NewUpdateTree(ModeDirect, t.TempDir(), keyHasherNoop) + + require.NotNil(t, ut.keccak) + require.NotNil(t, ut.keys) + require.Equal(t, ModeDirect, ut.mode) + }) + +} + +func TestUpdateTree_TouchPlainKey(t *testing.T) { + utUpdate := NewUpdateTree(ModeUpdate, t.TempDir(), keyHasherNoop) + utDirect := NewUpdateTree(ModeDirect, t.TempDir(), keyHasherNoop) + utUpdate1 := NewUpdateTree(ModeUpdate, t.TempDir(), keyHasherNoop) + utDirect1 := NewUpdateTree(ModeDirect, t.TempDir(), keyHasherNoop) + + type tc struct { + key []byte + val []byte + } + + upds := []tc{ + {common.FromHex("c17fa85f22306d37cec90b0ec74c5623dbbac68f"), []byte("value1")}, + {common.FromHex("553bba1d92398a69fbc9f01593bbc51b58862366"), []byte("value0")}, + {common.FromHex("553bba1d92398a69fbc9f01593bbc51b58862366"), []byte("value1")}, + {common.FromHex("97c780315e7820752006b7a918ce7ec023df263a87a715b64d5ab445e1782a760a974f8810551f81dfb7f1425f7d8358332af195"), []byte("value1")}, + } + for i := 0; i < len(upds); i++ { + utUpdate.TouchPlainKey(upds[i].key, upds[i].val, utUpdate.TouchStorage) + utDirect.TouchPlainKey(upds[i].key, upds[i].val, utDirect.TouchStorage) + utUpdate1.TouchPlainKey(upds[i].key, upds[i].val, utUpdate.TouchStorage) + utDirect1.TouchPlainKey(upds[i].key, upds[i].val, utDirect.TouchStorage) + } + + uniqUpds := make(map[string]tc) + for i := 0; i < len(upds); i++ { + uniqUpds[string(upds[i].key)] = upds[i] + } + sortedUniqUpds := make([]tc, 0, len(uniqUpds)) + for _, v := range uniqUpds { + sortedUniqUpds = append(sortedUniqUpds, v) + } + sort.Slice(sortedUniqUpds, func(i, j int) bool { + return bytes.Compare(sortedUniqUpds[i].key, sortedUniqUpds[j].key) < 0 + }) + + sz := utUpdate.Size() + require.EqualValues(t, 3, sz) + + sz = utDirect.Size() + require.EqualValues(t, 3, sz) + + pk, upd := utUpdate.List(true) + require.Len(t, pk, 3) + require.NotNil(t, upd) + + for i := 0; i < len(sortedUniqUpds); i++ { + require.EqualValues(t, sortedUniqUpds[i].key, pk[i]) + require.EqualValues(t, sortedUniqUpds[i].val, upd[i].CodeHashOrStorage[:upd[i].ValLength]) + } + + pk, upd = utDirect.List(true) + require.Len(t, pk, 3) + require.Nil(t, upd) + + for i := 0; i < len(sortedUniqUpds); i++ { + require.EqualValues(t, sortedUniqUpds[i].key, pk[i]) + } + + i := 0 + err := utUpdate1.HashSort(context.Background(), func(hk, pk []byte) error { + require.EqualValues(t, sortedUniqUpds[i].key, pk) + i++ + return nil + }) + require.NoError(t, err) + + i = 0 + err = utDirect1.HashSort(context.Background(), func(hk, pk []byte) error { + require.EqualValues(t, sortedUniqUpds[i].key, pk) + i++ + return nil + }) + require.NoError(t, err) +} diff --git a/erigon-lib/commitment/hex_patricia_hashed.go b/erigon-lib/commitment/hex_patricia_hashed.go index 2f831af70d8..130ec4eb4a1 100644 --- a/erigon-lib/commitment/hex_patricia_hashed.go +++ b/erigon-lib/commitment/hex_patricia_hashed.go @@ -22,6 +22,7 @@ import ( "encoding/binary" "encoding/hex" "fmt" + "github.com/ledgerwatch/erigon-lib/etl" "hash" "io" "math/bits" @@ -81,7 +82,6 @@ type HexPatriciaHashed struct { ctx PatriciaContext hashAuxBuffer [128]byte // buffer to compute cell hash or write hash-related things auxBuffer *bytes.Buffer // auxiliary buffer used during branch updates encoding - branchMerger *BranchMerger branchEncoder *BranchEncoder } @@ -92,7 +92,6 @@ func NewHexPatriciaHashed(accountKeyLen int, ctx PatriciaContext) *HexPatriciaHa keccak2: sha3.NewLegacyKeccak256().(keccakState), accountKeyLen: accountKeyLen, auxBuffer: bytes.NewBuffer(make([]byte, 8192)), - branchMerger: NewHexBranchMerger(1024), } tdir := os.TempDir() if ctx != nil { @@ -1054,7 +1053,7 @@ func (hph *HexPatriciaHashed) fold() (err error) { upCell.extLen = 0 upCell.downHashedLen = 0 if hph.branchBefore[row] { - _, err := hph.collectBranchUpdate(updateKey, 0, hph.touchMap[row], 0, RetrieveCellNoop) + _, err := hph.branchEncoder.CollectUpdate(hph.ctx, updateKey, 0, hph.touchMap[row], 0, RetrieveCellNoop) if err != nil { return fmt.Errorf("failed to encode leaf node update: %w", err) } @@ -1082,7 +1081,7 @@ func (hph *HexPatriciaHashed) fold() (err error) { upCell.fillFromLowerCell(cell, depth, hph.currentKey[upDepth:hph.currentKeyLen], nibble) // Delete if it existed if hph.branchBefore[row] { - _, err := hph.collectBranchUpdate(updateKey, 0, hph.touchMap[row], 0, RetrieveCellNoop) + _, err := hph.branchEncoder.CollectUpdate(hph.ctx, updateKey, 0, hph.touchMap[row], 0, RetrieveCellNoop) if err != nil { return fmt.Errorf("failed to encode leaf node update: %w", err) } @@ -1155,7 +1154,7 @@ func (hph *HexPatriciaHashed) fold() (err error) { var lastNibble int var err error - lastNibble, err = hph.collectBranchUpdate(updateKey, bitmap, hph.touchMap[row], hph.afterMap[row], cellGetter) + lastNibble, err = hph.branchEncoder.CollectUpdate(hph.ctx, updateKey, bitmap, hph.touchMap[row], hph.afterMap[row], cellGetter) if err != nil { return fmt.Errorf("failed to encode branch update: %w", err) } @@ -1270,45 +1269,109 @@ func (hph *HexPatriciaHashed) updateCell(plainKey, hashedKey []byte) *Cell { return cell } -func (hph *HexPatriciaHashed) collectBranchUpdate( - prefix []byte, - bitmap, touchMap, afterMap uint16, - readCell func(nibble int, skip bool) (*Cell, error), -) (lastNibble int, err error) { - - update, ln, err := hph.branchEncoder.EncodeBranch(bitmap, touchMap, afterMap, readCell) +func (hph *HexPatriciaHashed) RootHash() ([]byte, error) { + rh, err := hph.computeCellHash(&hph.root, 0, nil) if err != nil { - return 0, err + return nil, err } - prev, prevStep, err := hph.ctx.GetBranch(prefix) // prefix already compacted by fold + return rh[1:], nil // first byte is 128+hash_len +} + +func (hph *HexPatriciaHashed) ProcessTree(ctx context.Context, tree *UpdateTree, logPrefix string) (rootHash []byte, err error) { + var ( + stagedCell = new(Cell) + logEvery = time.NewTicker(20 * time.Second) + + m runtime.MemStats + ki uint64 + ) + defer logEvery.Stop() + updatesCount := tree.Size() + + err = tree.HashSort(ctx, func(hashedKey, plainKey []byte) error { + select { + case <-logEvery.C: + dbg.ReadMemStats(&m) + log.Info(fmt.Sprintf("[%s][agg] computing trie", logPrefix), + "progress", fmt.Sprintf("%dk/%dk", ki/1000, updatesCount/1000), "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys)) + default: + } + + if hph.trace { + fmt.Printf("\n%d/%d) plainKey=[%x], hashedKey=[%x], currentKey=[%x]\n", ki+1, updatesCount, plainKey, hashedKey, hph.currentKey[:hph.currentKeyLen]) + } + // Keep folding until the currentKey is the prefix of the key we modify + for hph.needFolding(hashedKey) { + if err := hph.fold(); err != nil { + return fmt.Errorf("fold: %w", err) + } + } + // Now unfold until we step on an empty cell + for unfolding := hph.needUnfolding(hashedKey); unfolding > 0; unfolding = hph.needUnfolding(hashedKey) { + if err := hph.unfold(hashedKey, unfolding); err != nil { + return fmt.Errorf("unfold: %w", err) + } + } + + // Update the cell + stagedCell.reset() + if len(plainKey) == hph.accountKeyLen { + if err := hph.ctx.GetAccount(plainKey, stagedCell); err != nil { + return fmt.Errorf("GetAccount for key %x failed: %w", plainKey, err) + } + if !stagedCell.Delete { + cell := hph.updateCell(plainKey, hashedKey) + cell.setAccountFields(stagedCell.CodeHash[:], &stagedCell.Balance, stagedCell.Nonce) + + if hph.trace { + fmt.Printf("GetAccount update key %x => balance=%d nonce=%v codeHash=%x\n", cell.apk, &cell.Balance, cell.Nonce, cell.CodeHash) + } + } + } else { + if err = hph.ctx.GetStorage(plainKey, stagedCell); err != nil { + return fmt.Errorf("GetStorage for key %x failed: %w", plainKey, err) + } + if !stagedCell.Delete { + hph.updateCell(plainKey, hashedKey).setStorage(stagedCell.Storage[:stagedCell.StorageLen]) + if hph.trace { + fmt.Printf("GetStorage reading key %x => %x\n", plainKey, stagedCell.Storage[:stagedCell.StorageLen]) + } + } + } + + if stagedCell.Delete { + if hph.trace { + fmt.Printf("delete cell %x hash %x\n", plainKey, hashedKey) + } + hph.deleteCell(hashedKey) + } + mxCommitmentKeys.Inc() + ki++ + return nil + }) if err != nil { - return 0, err + return nil, fmt.Errorf("hash sort failed: %w", err) } - if len(prev) > 0 { - previous := BranchData(prev) - merged, err := hph.branchMerger.Merge(previous, update) - if err != nil { - return 0, err + + // Folding everything up to the root + for hph.activeRows > 0 { + if err := hph.fold(); err != nil { + return nil, fmt.Errorf("final fold: %w", err) } - update = merged } - // this updates ensures that if commitment is present, each branch are also present in commitment state at that moment with costs of storage - //fmt.Printf("commitment branch encoder merge prefix [%x] [%x]->[%x]\n%update\n", prefix, stateValue, update, BranchData(update).String()) - cp, cu := common.Copy(prefix), common.Copy(update) // has to copy :( - if err = hph.ctx.PutBranch(cp, cu, prev, prevStep); err != nil { - return 0, err + rootHash, err = hph.RootHash() + if err != nil { + return nil, fmt.Errorf("root hash evaluation failed: %w", err) } - mxCommitmentBranchUpdates.Inc() - return ln, nil -} - -func (hph *HexPatriciaHashed) RootHash() ([]byte, error) { - rh, err := hph.computeCellHash(&hph.root, 0, nil) + if hph.trace { + fmt.Printf("root hash %x updates %d\n", rootHash, updatesCount) + } + err = hph.branchEncoder.Load(hph.ctx, etl.TransformArgs{Quit: ctx.Done()}) if err != nil { - return nil, err + return nil, fmt.Errorf("branch update failed: %w", err) } - return rh[1:], nil // first byte is 128+hash_len + return rootHash, nil } // Process keys and updates in a single pass. Branch updates are written to PatriciaContext if no error occurs. @@ -1403,6 +1466,10 @@ func (hph *HexPatriciaHashed) ProcessKeys(ctx context.Context, plainKeys [][]byt if hph.trace { fmt.Printf("root hash %x updates %d\n", rootHash, len(plainKeys)) } + err = hph.branchEncoder.Load(hph.ctx, etl.TransformArgs{Quit: ctx.Done()}) + if err != nil { + return nil, fmt.Errorf("branch update failed: %w", err) + } return rootHash, nil } @@ -1492,6 +1559,10 @@ func (hph *HexPatriciaHashed) ProcessUpdates(ctx context.Context, plainKeys [][] if err != nil { return nil, fmt.Errorf("root hash evaluation failed: %w", err) } + err = hph.branchEncoder.Load(hph.ctx, etl.TransformArgs{Quit: ctx.Done()}) + if err != nil { + return nil, fmt.Errorf("branch update failed: %w", err) + } return rootHash, nil } diff --git a/erigon-lib/commitment/hex_patricia_hashed_bench_test.go b/erigon-lib/commitment/hex_patricia_hashed_bench_test.go index 930a98468a3..643a6c1accd 100644 --- a/erigon-lib/commitment/hex_patricia_hashed_bench_test.go +++ b/erigon-lib/commitment/hex_patricia_hashed_bench_test.go @@ -9,7 +9,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common/length" ) -func Benchmark_HexPatriciaHahsed_ReviewKeys(b *testing.B) { +func Benchmark_HexPatriciaHashed_ReviewKeys(b *testing.B) { ms := NewMockState(&testing.T{}) ctx := context.Background() hph := NewHexPatriciaHashed(length.Addr, ms) diff --git a/erigon-lib/state/aggregator_files.go b/erigon-lib/state/aggregator_files.go index c8ad7dc545e..53e3e01c32e 100644 --- a/erigon-lib/state/aggregator_files.go +++ b/erigon-lib/state/aggregator_files.go @@ -107,7 +107,6 @@ func (ac *AggregatorRoTx) staticFilesInRange(r RangesV3) (sf SelectedStaticFiles for id := range ac.d { if r.d[id].any() { sf.d[id], sf.dIdx[id], sf.dHist[id], sf.dI[id] = ac.d[id].staticFilesInRange(r.d[id]) - } } if r.logAddrs { diff --git a/erigon-lib/state/btree_index.go b/erigon-lib/state/btree_index.go index 13724222487..da1029b7d8b 100644 --- a/erigon-lib/state/btree_index.go +++ b/erigon-lib/state/btree_index.go @@ -7,6 +7,7 @@ import ( "encoding/binary" "errors" "fmt" + "github.com/ledgerwatch/erigon-lib/common" "math" "os" "path" @@ -20,7 +21,6 @@ import ( "github.com/ledgerwatch/log/v3" "github.com/spaolacci/murmur3" - "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/background" "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/etl" @@ -626,26 +626,7 @@ func (btw *BtIndexWriter) AddKey(key []byte, offset uint64) error { // loadFuncBucket is required to satisfy the type etl.LoadFunc type, to use with collector.Load func (btw *BtIndexWriter) loadFuncBucket(k, v []byte, _ etl.CurrentTableReader, _ etl.LoadNextFunc) error { - // k is the BigEndian encoding of the bucket number, and the v is the key that is assigned into that bucket - //if uint64(len(btw.vals)) >= btw.batchSizeLimit { - // if err := btw.drainBatch(); err != nil { - // return err - // } - //} - - // if _, err := btw.indexW.Write(k); err != nil { - // return err - // } - //if _, err := btw.indexW.Write(v); err != nil { - // return err - //} - //copy(btw.numBuf[8-btw.bytesPerRec:], v) - //btw.ef.AddOffset(binary.BigEndian.Uint64(btw.numBuf[:])) - btw.ef.AddOffset(binary.BigEndian.Uint64(v)) - - //btw.keys = append(btw.keys, binary.BigEndian.Uint64(k), binary.BigEndian.Uint64(k[8:])) - //btw.vals = append(btw.vals, binary.BigEndian.Uint64(v)) return nil } @@ -915,12 +896,12 @@ func (b *BtIndex) keyCmp(k []byte, di uint64, g ArchiveGetter) (int, []byte, err // Key and value is valid until cursor.Next is called func (b *BtIndex) newCursor(ctx context.Context, k, v []byte, d uint64, g ArchiveGetter) *Cursor { return &Cursor{ - btt: b, ctx: ctx, getter: g, key: common.Copy(k), value: common.Copy(v), d: d, + btt: b, } } diff --git a/erigon-lib/state/domain.go b/erigon-lib/state/domain.go index 2eef13d54de..b18c10a8fe0 100644 --- a/erigon-lib/state/domain.go +++ b/erigon-lib/state/domain.go @@ -631,6 +631,7 @@ type DomainRoTx struct { keyBuf [60]byte // 52b key and 8b for inverted step valBuf [128]byte + comBuf []byte keysC kv.CursorDupSort valsC kv.Cursor @@ -2043,6 +2044,7 @@ func (hi *DomainLatestIterFile) init(dc *DomainRoTx) error { } for i, item := range dc.files { + // todo release btcursor when iter over/make it truly stateless btCursor, err := dc.statelessBtree(i).Seek(dc.statelessGetter(i), hi.from) if err != nil { return err diff --git a/erigon-lib/state/domain_committed.go b/erigon-lib/state/domain_committed.go index eac2e22b636..2374b99ef28 100644 --- a/erigon-lib/state/domain_committed.go +++ b/erigon-lib/state/domain_committed.go @@ -20,200 +20,15 @@ import ( "bytes" "encoding/binary" "fmt" - "slices" "strings" - "github.com/google/btree" - "github.com/ledgerwatch/erigon-lib/recsplit" - "github.com/ledgerwatch/erigon-lib/types" - "golang.org/x/crypto/sha3" - "github.com/ledgerwatch/erigon-lib/commitment" - "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/common/cryptozerocopy" "github.com/ledgerwatch/erigon-lib/common/length" + "github.com/ledgerwatch/erigon-lib/recsplit" ) -// Defines how to evaluate commitments -type CommitmentMode uint - -const ( - CommitmentModeDisabled CommitmentMode = 0 - CommitmentModeDirect CommitmentMode = 1 - CommitmentModeUpdate CommitmentMode = 2 -) - -func (m CommitmentMode) String() string { - switch m { - case CommitmentModeDisabled: - return "disabled" - case CommitmentModeDirect: - return "direct" - case CommitmentModeUpdate: - return "update" - default: - return "unknown" - } -} - -func ParseCommitmentMode(s string) CommitmentMode { - var mode CommitmentMode - switch s { - case "off": - mode = CommitmentModeDisabled - case "update": - mode = CommitmentModeUpdate - default: - mode = CommitmentModeDirect - } - return mode -} - type ValueMerger func(prev, current []byte) (merged []byte, err error) -type UpdateTree struct { - tree *btree.BTreeG[*commitmentItem] - keccak cryptozerocopy.KeccakState - keys map[string]struct{} - mode CommitmentMode -} - -func NewUpdateTree(m CommitmentMode) *UpdateTree { - return &UpdateTree{ - tree: btree.NewG[*commitmentItem](64, commitmentItemLessPlain), - keccak: sha3.NewLegacyKeccak256().(cryptozerocopy.KeccakState), - keys: map[string]struct{}{}, - mode: m, - } -} - -func (t *UpdateTree) get(key []byte) (*commitmentItem, bool) { - c := &commitmentItem{plainKey: key, update: commitment.Update{CodeHashOrStorage: commitment.EmptyCodeHashArray}} - el, ok := t.tree.Get(c) - if ok { - return el, true - } - c.plainKey = common.Copy(c.plainKey) - return c, false -} - -// TouchPlainKey marks plainKey as updated and applies different fn for different key types -// (different behaviour for Code, Account and Storage key modifications). -func (t *UpdateTree) TouchPlainKey(key string, val []byte, fn func(c *commitmentItem, val []byte)) { - switch t.mode { - case CommitmentModeUpdate: - item, _ := t.get([]byte(key)) - fn(item, val) - t.tree.ReplaceOrInsert(item) - case CommitmentModeDirect: - t.keys[key] = struct{}{} - default: - } -} - -func (t *UpdateTree) Size() uint64 { - return uint64(len(t.keys)) -} - -func (t *UpdateTree) TouchAccount(c *commitmentItem, val []byte) { - if len(val) == 0 { - c.update.Flags = commitment.DeleteUpdate - return - } - if c.update.Flags&commitment.DeleteUpdate != 0 { - c.update.Flags ^= commitment.DeleteUpdate - } - nonce, balance, chash := types.DecodeAccountBytesV3(val) - if c.update.Nonce != nonce { - c.update.Nonce = nonce - c.update.Flags |= commitment.NonceUpdate - } - if !c.update.Balance.Eq(balance) { - c.update.Balance.Set(balance) - c.update.Flags |= commitment.BalanceUpdate - } - if !bytes.Equal(chash, c.update.CodeHashOrStorage[:]) { - if len(chash) == 0 { - c.update.ValLength = length.Hash - copy(c.update.CodeHashOrStorage[:], commitment.EmptyCodeHash) - } else { - copy(c.update.CodeHashOrStorage[:], chash) - c.update.ValLength = length.Hash - c.update.Flags |= commitment.CodeUpdate - } - } -} - -func (t *UpdateTree) UpdatePrefix(prefix, val []byte, fn func(c *commitmentItem, val []byte)) { - t.tree.AscendGreaterOrEqual(&commitmentItem{}, func(item *commitmentItem) bool { - if !bytes.HasPrefix(item.plainKey, prefix) { - return false - } - fn(item, val) - return true - }) -} - -func (t *UpdateTree) TouchStorage(c *commitmentItem, val []byte) { - c.update.ValLength = len(val) - if len(val) == 0 { - c.update.Flags = commitment.DeleteUpdate - } else { - c.update.Flags |= commitment.StorageUpdate - copy(c.update.CodeHashOrStorage[:], val) - } -} - -func (t *UpdateTree) TouchCode(c *commitmentItem, val []byte) { - t.keccak.Reset() - t.keccak.Write(val) - t.keccak.Read(c.update.CodeHashOrStorage[:]) - if c.update.Flags == commitment.DeleteUpdate && len(val) == 0 { - c.update.Flags = commitment.DeleteUpdate - c.update.ValLength = 0 - return - } - c.update.ValLength = length.Hash - if len(val) != 0 { - c.update.Flags |= commitment.CodeUpdate - } -} - -// Returns list of both plain and hashed keys. If .mode is CommitmentModeUpdate, updates also returned. -// No ordering guarantees is provided. -func (t *UpdateTree) List(clear bool) ([][]byte, []commitment.Update) { - switch t.mode { - case CommitmentModeDirect: - plainKeys := make([][]byte, len(t.keys)) - i := 0 - for key := range t.keys { - plainKeys[i] = []byte(key) - i++ - } - slices.SortFunc(plainKeys, bytes.Compare) - if clear { - t.keys = make(map[string]struct{}, len(t.keys)/8) - } - - return plainKeys, nil - case CommitmentModeUpdate: - plainKeys := make([][]byte, t.tree.Len()) - updates := make([]commitment.Update, t.tree.Len()) - i := 0 - t.tree.Ascend(func(item *commitmentItem) bool { - plainKeys[i], updates[i] = item.plainKey, item.update - i++ - return true - }) - if clear { - t.tree.Clear(true) - } - return plainKeys, updates - default: - return nil, nil - } -} - type commitmentState struct { txNum uint64 blockNum uint64 @@ -268,27 +83,19 @@ func encodeShorterKey(buf []byte, offset uint64) []byte { return binary.AppendUvarint(buf, offset) } -type commitmentItem struct { - plainKey []byte - update commitment.Update -} - -func commitmentItemLessPlain(i, j *commitmentItem) bool { - return bytes.Compare(i.plainKey, j.plainKey) < 0 -} - // Finds shorter replacement for full key in given file item. filesItem -- result of merging of multiple files. // If item is nil, or shorter key was not found, or anything else goes wrong, nil key and false returned. -func (dt *DomainRoTx) findShortenedKey(fullKey []byte, item *filesItem) (shortened []byte, found bool) { +func (dt *DomainRoTx) findShortenedKey(fullKey []byte, itemGetter ArchiveGetter, item *filesItem) (shortened []byte, found bool) { if item == nil { return nil, false } - if !strings.Contains(item.decompressor.FileName(), dt.d.filenameBase) { panic(fmt.Sprintf("findShortenedKeyEasier of %s called with merged file %s", dt.d.filenameBase, item.decompressor.FileName())) } - - g := NewArchiveGetter(item.decompressor.MakeGetter(), dt.d.compression) + if /*assert.Enable && */ itemGetter.FileName() != item.decompressor.FileName() { + panic(fmt.Sprintf("findShortenedKey of %s itemGetter (%s) is different to item.decompressor (%s)", + dt.d.filenameBase, itemGetter.FileName(), item.decompressor.FileName())) + } //if idxList&withExistence != 0 { // hi, _ := dt.ht.iit.hashKey(fullKey) @@ -306,14 +113,14 @@ func (dt *DomainRoTx) findShortenedKey(fullKey []byte, item *filesItem) (shorten return nil, false } - g.Reset(offset) - if !g.HasNext() { + itemGetter.Reset(offset) + if !itemGetter.HasNext() { dt.d.logger.Warn("commitment branch key replacement seek failed", "key", fmt.Sprintf("%x", fullKey), "idx", "hash", "file", item.decompressor.FileName()) return nil, false } - k, _ := g.Next(nil) + k, _ := itemGetter.Next(nil) if !bytes.Equal(fullKey, k) { dt.d.logger.Warn("commitment branch key replacement seek invalid key", "key", fmt.Sprintf("%x", fullKey), "idx", "hash", "file", item.decompressor.FileName()) @@ -323,7 +130,7 @@ func (dt *DomainRoTx) findShortenedKey(fullKey []byte, item *filesItem) (shorten return encodeShorterKey(nil, offset), true } if dt.d.indexList&withBTree != 0 { - cur, err := item.bindex.Seek(g, fullKey) + cur, err := item.bindex.Seek(itemGetter, fullKey) if err != nil { dt.d.logger.Warn("commitment branch key replacement seek failed", "key", fmt.Sprintf("%x", fullKey), "idx", "bt", "err", err, "file", item.decompressor.FileName()) @@ -334,9 +141,9 @@ func (dt *DomainRoTx) findShortenedKey(fullKey []byte, item *filesItem) (shorten } offset := cur.offsetInFile() - if uint64(g.Size()) <= offset { + if uint64(itemGetter.Size()) <= offset { dt.d.logger.Warn("commitment branch key replacement seek gone too far", - "key", fmt.Sprintf("%x", fullKey), "offset", offset, "size", g.Size(), "file", item.decompressor.FileName()) + "key", fmt.Sprintf("%x", fullKey), "offset", offset, "size", itemGetter.Size(), "file", item.decompressor.FileName()) return nil, false } return encodeShorterKey(nil, offset), true @@ -344,12 +151,7 @@ func (dt *DomainRoTx) findShortenedKey(fullKey []byte, item *filesItem) (shorten return nil, false } -// searches in given list of files for a key or searches in domain files if list is empty -func (dt *DomainRoTx) lookupByShortenedKey(shortKey []byte, txFrom uint64, txTo uint64) (fullKey []byte, found bool) { - if len(shortKey) < 1 { - return nil, false - } - +func (dt *DomainRoTx) lookupFileByItsRange(txFrom uint64, txTo uint64) *filesItem { var item *filesItem for _, f := range dt.files { if f.startTxNum == txFrom && f.endTxNum == txTo { @@ -378,47 +180,43 @@ func (dt *DomainRoTx) lookupByShortenedKey(shortKey []byte, txFrom uint64, txTo for _, f := range dt.files { visibleFiles += fmt.Sprintf("%d-%d;", f.startTxNum/dt.d.aggregationStep, f.endTxNum/dt.d.aggregationStep) } - dt.d.logger.Warn("lookupByShortenedKey file not found", + dt.d.logger.Warn("lookupFileByItsRange: file not found", "stepFrom", txFrom/dt.d.aggregationStep, "stepTo", txTo/dt.d.aggregationStep, - "shortened", fmt.Sprintf("%x", shortKey), "domain", dt.d.keysTable, "files", fileStepsss, "_visibleFiles", visibleFiles, "visibleFilesCount", len(dt.files), "filesCount", dt.d.dirtyFiles.Len()) - return nil, false + return nil } + return item +} +// searches in given list of files for a key or searches in domain files if list is empty +func (dt *DomainRoTx) lookupByShortenedKey(shortKey []byte, getter ArchiveGetter) (fullKey []byte, found bool) { + if len(shortKey) < 1 { + return nil, false + } offset := decodeShorterKey(shortKey) defer func() { if r := recover(); r != nil { dt.d.logger.Crit("lookupByShortenedKey panics", "err", r, "domain", dt.d.keysTable, - "short", fmt.Sprintf("%x", shortKey), - "stepFrom", txFrom/dt.d.aggregationStep, "stepTo", txTo/dt.d.aggregationStep, "offset", offset, - "visibleFilesCount", len(dt.files), "filesCount", dt.d.dirtyFiles.Len(), - "fileFound", item != nil) + "offset", offset, "short", fmt.Sprintf("%x", shortKey), + "cleanFilesCount", len(dt.files), "dirtyFilesCount", dt.d.dirtyFiles.Len(), + "file", getter.FileName()) } }() - g := NewArchiveGetter(item.decompressor.MakeGetter(), dt.d.compression) - g.Reset(offset) - if !g.HasNext() || uint64(g.Size()) <= offset { - dt.d.logger.Warn("lookupByShortenedKey failed", - "stepFrom", txFrom/dt.d.aggregationStep, "stepTo", txTo/dt.d.aggregationStep, "offset", offset, - "size", g.Size(), "short", shortKey, "file", item.decompressor.FileName()) + //getter := NewArchiveGetter(item.decompressor.MakeGetter(), dt.d.compression) + getter.Reset(offset) + if !getter.HasNext() || uint64(getter.Size()) <= offset { + dt.d.logger.Warn("lookupByShortenedKey failed", "short", shortKey, "offset", offset, "file", getter.FileName()) return nil, false } - fullKey, _ = g.Next(nil) - // dt.d.logger.Debug(fmt.Sprintf("lookupByShortenedKey [%x]=>{%x}", shortKey, fullKey), - // "stepFrom", stepFrom, "stepTo", stepTo, "offset", offset, "file", item.decompressor.FileName()) + fullKey, _ = getter.Next(nil) return fullKey, true } -//func (dc *DomainRoTx) SqueezeExistingCommitmentFile() { -// dc.commitmentValTransformDomain() -// -//} - // commitmentValTransform parses the value of the commitment record to extract references // to accounts and storage items, then looks them up in the new, merged files, and replaces them with // the updated references @@ -436,69 +234,90 @@ func (dt *DomainRoTx) commitmentValTransformDomain(accounts, storage *DomainRoTx if !dt.d.replaceKeysInValues || len(valBuf) == 0 { return valBuf, nil } + si := storage.lookupFileByItsRange(keyFromTxNum, keyEndTxNum) + if si == nil { + return nil, fmt.Errorf("storage file not found for %d-%d", keyFromTxNum, keyEndTxNum) + } + ai := accounts.lookupFileByItsRange(keyFromTxNum, keyEndTxNum) + if ai == nil { + return nil, fmt.Errorf("account file not found for %d-%d", keyFromTxNum, keyEndTxNum) + } - return commitment.BranchData(valBuf). - ReplacePlainKeys(nil, func(key []byte, isStorage bool) ([]byte, error) { - var found bool - var buf []byte - if isStorage { - if len(key) == length.Addr+length.Hash { - // Non-optimised key originating from a database record - buf = append(buf[:0], key...) - } else { - // Optimised key referencing a state file record (file number and offset within the file) - buf, found = storage.lookupByShortenedKey(key, keyFromTxNum, keyEndTxNum) - if !found { - dt.d.logger.Crit("valTransform: lost storage full key", - "shortened", fmt.Sprintf("%x", key), - "merging", stoMerged, - "valBuf", fmt.Sprintf("l=%d %x", len(valBuf), valBuf), - ) - return nil, fmt.Errorf("lookup lost storage full key %x", key) - } - } + if si.decompressor == nil || ai.decompressor == nil { + return nil, fmt.Errorf("decompressor is nil for existing storage or account") + } + if mergedStorage == nil || mergedAccount == nil { + return nil, fmt.Errorf("mergedStorage or mergedAccount is nil") + } - shortened, found := storage.findShortenedKey(buf, mergedStorage) - if !found { - if len(buf) == length.Addr+length.Hash { - return buf, nil // if plain key is lost, we can save original fullkey - } - // if shortened key lost, we can't continue - dt.d.logger.Crit("valTransform: replacement for full storage key was not found", - "step", fmt.Sprintf("%d-%d", keyFromTxNum/dt.d.aggregationStep, keyEndTxNum/dt.d.aggregationStep), - "shortened", fmt.Sprintf("%x", shortened), "toReplace", fmt.Sprintf("%x", buf)) - - return nil, fmt.Errorf("replacement not found for storage %x", buf) - } - return shortened, nil - } + sig := NewArchiveGetter(si.decompressor.MakeGetter(), storage.d.compression) + aig := NewArchiveGetter(ai.decompressor.MakeGetter(), accounts.d.compression) + ms := NewArchiveGetter(mergedStorage.decompressor.MakeGetter(), storage.d.compression) + ma := NewArchiveGetter(mergedAccount.decompressor.MakeGetter(), storage.d.compression) - if len(key) == length.Addr { + replacer := func(key []byte, isStorage bool) ([]byte, error) { + var found bool + auxBuf := dt.keyBuf[:0] + if isStorage { + if len(key) == length.Addr+length.Hash { // Non-optimised key originating from a database record - buf = append(buf[:0], key...) + auxBuf = append(auxBuf[:0], key...) } else { - buf, found = accounts.lookupByShortenedKey(key, keyFromTxNum, keyEndTxNum) + // Optimised key referencing a state file record (file number and offset within the file) + auxBuf, found = storage.lookupByShortenedKey(key, sig) if !found { - dt.d.logger.Crit("valTransform: lost account full key", + dt.d.logger.Crit("valTransform: lost storage full key", "shortened", fmt.Sprintf("%x", key), - "merging", accMerged, + "merging", stoMerged, "valBuf", fmt.Sprintf("l=%d %x", len(valBuf), valBuf), ) - return nil, fmt.Errorf("lookup account full key: %x", key) + return nil, fmt.Errorf("lookup lost storage full key %x", key) } } - shortened, found := accounts.findShortenedKey(buf, mergedAccount) + shortened, found := storage.findShortenedKey(auxBuf, ms, mergedStorage) if !found { - if len(buf) == length.Addr { - return buf, nil // if plain key is lost, we can save original fullkey + if len(auxBuf) == length.Addr+length.Hash { + return auxBuf, nil // if plain key is lost, we can save original fullkey } - dt.d.logger.Crit("valTransform: replacement for full account key was not found", + // if shortened key lost, we can't continue + dt.d.logger.Crit("valTransform: replacement for full storage key was not found", "step", fmt.Sprintf("%d-%d", keyFromTxNum/dt.d.aggregationStep, keyEndTxNum/dt.d.aggregationStep), - "shortened", fmt.Sprintf("%x", shortened), "toReplace", fmt.Sprintf("%x", buf)) - return nil, fmt.Errorf("replacement not found for account %x", buf) + "shortened", fmt.Sprintf("%x", shortened), "toReplace", fmt.Sprintf("%x", auxBuf)) + + return nil, fmt.Errorf("replacement not found for storage %x", auxBuf) } return shortened, nil - }) + } + + if len(key) == length.Addr { + // Non-optimised key originating from a database record + auxBuf = append(auxBuf[:0], key...) + } else { + auxBuf, found = accounts.lookupByShortenedKey(key, aig) + if !found { + dt.d.logger.Crit("valTransform: lost account full key", + "shortened", fmt.Sprintf("%x", key), + "merging", accMerged, + "valBuf", fmt.Sprintf("l=%d %x", len(valBuf), valBuf), + ) + return nil, fmt.Errorf("lookup account full key: %x", key) + } + } + + shortened, found := accounts.findShortenedKey(auxBuf, ma, mergedAccount) + if !found { + if len(auxBuf) == length.Addr { + return auxBuf, nil // if plain key is lost, we can save original fullkey + } + dt.d.logger.Crit("valTransform: replacement for full account key was not found", + "step", fmt.Sprintf("%d-%d", keyFromTxNum/dt.d.aggregationStep, keyEndTxNum/dt.d.aggregationStep), + "shortened", fmt.Sprintf("%x", shortened), "toReplace", fmt.Sprintf("%x", auxBuf)) + return nil, fmt.Errorf("replacement not found for account %x", auxBuf) + } + return shortened, nil + } + + return commitment.BranchData(valBuf).ReplacePlainKeys(dt.comBuf[:0], replacer) } } diff --git a/erigon-lib/state/domain_shared.go b/erigon-lib/state/domain_shared.go index faef82b1439..30fe130a958 100644 --- a/erigon-lib/state/domain_shared.go +++ b/erigon-lib/state/domain_shared.go @@ -6,6 +6,8 @@ import ( "context" "encoding/binary" "fmt" + "github.com/ledgerwatch/erigon-lib/common/cryptozerocopy" + "golang.org/x/crypto/sha3" "math" "path/filepath" "runtime" @@ -109,7 +111,7 @@ func NewSharedDomains(tx kv.Tx, logger log.Logger) (*SharedDomains, error) { } sd.SetTxNum(0) - sd.sdCtx = NewSharedDomainsCommitmentContext(sd, CommitmentModeDirect, commitment.VariantHexPatriciaTrie) + sd.sdCtx = NewSharedDomainsCommitmentContext(sd, commitment.ModeDirect, commitment.VariantHexPatriciaTrie) if _, err := sd.SeekCommitment(context.Background(), tx); err != nil { return nil, fmt.Errorf("SeekCommitment: %w", err) @@ -167,7 +169,7 @@ func (sd *SharedDomains) rebuildCommitment(ctx context.Context, roTx kv.Tx, bloc if err != nil { return nil, err } - sd.sdCtx.TouchPlainKey(string(k), nil, sd.sdCtx.TouchAccount) + sd.sdCtx.TouchKey(kv.AccountsDomain, string(k), nil) } it, err = sd.aggCtx.StorageHistoryRange(int(sd.TxNum()), math.MaxInt64, order.Asc, -1, roTx) @@ -180,7 +182,7 @@ func (sd *SharedDomains) rebuildCommitment(ctx context.Context, roTx kv.Tx, bloc if err != nil { return nil, err } - sd.sdCtx.TouchPlainKey(string(k), nil, sd.sdCtx.TouchStorage) + sd.sdCtx.TouchKey(kv.StorageDomain, string(k), nil) } sd.sdCtx.Reset() @@ -246,7 +248,7 @@ func (sd *SharedDomains) SeekCommitment(ctx context.Context, tx kv.Tx) (txsFromB func (sd *SharedDomains) ClearRam(resetCommitment bool) { //sd.muMaps.Lock() //defer sd.muMaps.Unlock() - for i, _ := range sd.domains { + for i := range sd.domains { sd.domains[i] = map[string][]byte{} } if resetCommitment { @@ -347,18 +349,25 @@ func (sd *SharedDomains) replaceShortenedKeysInBranch(prefix []byte, branch comm return branch, nil // do not transform, return as is } - return branch.ReplacePlainKeys(nil, func(key []byte, isStorage bool) ([]byte, error) { + sto := sd.aggCtx.d[kv.StorageDomain] + acc := sd.aggCtx.d[kv.AccountsDomain] + storageItem := sto.lookupFileByItsRange(fStartTxNum, fEndTxNum) + accountItem := acc.lookupFileByItsRange(fStartTxNum, fEndTxNum) + storageGetter := NewArchiveGetter(storageItem.decompressor.MakeGetter(), sto.d.compression) + accountGetter := NewArchiveGetter(accountItem.decompressor.MakeGetter(), acc.d.compression) + + aux := make([]byte, 0, 256) + return branch.ReplacePlainKeys(aux, func(key []byte, isStorage bool) ([]byte, error) { if isStorage { if len(key) == length.Addr+length.Hash { return nil, nil // save storage key as is } // Optimised key referencing a state file record (file number and offset within the file) - storagePlainKey, found := sd.aggCtx.d[kv.StorageDomain].lookupByShortenedKey(key, fStartTxNum, fEndTxNum) + storagePlainKey, found := sto.lookupByShortenedKey(key, storageGetter) if !found { s0, s1 := fStartTxNum/sd.aggCtx.a.StepSize(), fEndTxNum/sd.aggCtx.a.StepSize() - oft := decodeShorterKey(key) sd.logger.Crit("replace back lost storage full key", "shortened", fmt.Sprintf("%x", key), - "decoded", fmt.Sprintf("step %d-%d; offt %d", s0, s1, oft)) + "decoded", fmt.Sprintf("step %d-%d; offt %d", s0, s1, decodeShorterKey(key))) return nil, fmt.Errorf("replace back lost storage full key: %x", key) } return storagePlainKey, nil @@ -368,12 +377,11 @@ func (sd *SharedDomains) replaceShortenedKeysInBranch(prefix []byte, branch comm return nil, nil // save account key as is } - apkBuf, found := sd.aggCtx.d[kv.AccountsDomain].lookupByShortenedKey(key, fStartTxNum, fEndTxNum) + apkBuf, found := acc.lookupByShortenedKey(key, accountGetter) if !found { - oft := decodeShorterKey(key) s0, s1 := fStartTxNum/sd.aggCtx.a.StepSize(), fEndTxNum/sd.aggCtx.a.StepSize() sd.logger.Crit("replace back lost account full key", "shortened", fmt.Sprintf("%x", key), - "decoded", fmt.Sprintf("step %d-%d; offt %d", s0, s1, oft)) + "decoded", fmt.Sprintf("step %d-%d; offt %d", s0, s1, decodeShorterKey(key))) return nil, fmt.Errorf("replace back lost account full key: %x", key) } return apkBuf, nil @@ -434,14 +442,14 @@ func (sd *SharedDomains) ReadsValid(readLists map[string]*KvList) bool { func (sd *SharedDomains) updateAccountData(addr []byte, account, prevAccount []byte, prevStep uint64) error { addrS := string(addr) - sd.sdCtx.TouchPlainKey(addrS, account, sd.sdCtx.TouchAccount) + sd.sdCtx.TouchKey(kv.AccountsDomain, addrS, account) sd.put(kv.AccountsDomain, addrS, account) return sd.dWriter[kv.AccountsDomain].PutWithPrev(addr, nil, account, prevAccount, prevStep) } func (sd *SharedDomains) updateAccountCode(addr, code, prevCode []byte, prevStep uint64) error { addrS := string(addr) - sd.sdCtx.TouchPlainKey(addrS, code, sd.sdCtx.TouchCode) + sd.sdCtx.TouchKey(kv.CodeDomain, addrS, code) sd.put(kv.CodeDomain, addrS, code) if len(code) == 0 { return sd.dWriter[kv.CodeDomain].DeleteWithPrev(addr, nil, prevCode, prevStep) @@ -465,7 +473,7 @@ func (sd *SharedDomains) deleteAccount(addr, prev []byte, prevStep uint64) error return err } - sd.sdCtx.TouchPlainKey(addrS, nil, sd.sdCtx.TouchAccount) + sd.sdCtx.TouchKey(kv.AccountsDomain, addrS, nil) sd.put(kv.AccountsDomain, addrS, nil) if err := sd.dWriter[kv.AccountsDomain].DeleteWithPrev(addr, nil, prev, prevStep); err != nil { return err @@ -481,7 +489,7 @@ func (sd *SharedDomains) writeAccountStorage(addr, loc []byte, value, preVal []b composite = append(append(composite, addr...), loc...) } compositeS := string(composite) - sd.sdCtx.TouchPlainKey(compositeS, value, sd.sdCtx.TouchStorage) + sd.sdCtx.TouchKey(kv.StorageDomain, compositeS, value) sd.put(kv.StorageDomain, compositeS, value) return sd.dWriter[kv.StorageDomain].PutWithPrev(composite, nil, value, preVal, prevStep) } @@ -492,7 +500,7 @@ func (sd *SharedDomains) delAccountStorage(addr, loc []byte, preVal []byte, prev composite = append(append(composite, addr...), loc...) } compositeS := string(composite) - sd.sdCtx.TouchPlainKey(compositeS, nil, sd.sdCtx.TouchStorage) + sd.sdCtx.TouchKey(kv.StorageDomain, compositeS, nil) sd.put(kv.StorageDomain, compositeS, nil) return sd.dWriter[kv.StorageDomain].DeleteWithPrev(composite, nil, preVal, prevStep) } @@ -718,8 +726,7 @@ func (sd *SharedDomains) Close() { } if sd.sdCtx != nil { - sd.sdCtx.updates.keys = nil - sd.sdCtx.updates.tree.Clear(true) + sd.sdCtx.Close() } } @@ -901,39 +908,44 @@ func (sd *SharedDomains) Tx() kv.Tx { return sd.roTx } type SharedDomainsCommitmentContext struct { sd *SharedDomains discard bool - updates *UpdateTree - mode CommitmentMode - branchCache map[string]cachedBranch + mode commitment.Mode + branches map[string]cachedBranch + keccak cryptozerocopy.KeccakState + updates *commitment.UpdateTree patriciaTrie commitment.Trie justRestored atomic.Bool } -func NewSharedDomainsCommitmentContext(sd *SharedDomains, mode CommitmentMode, trieVariant commitment.TrieVariant) *SharedDomainsCommitmentContext { +func NewSharedDomainsCommitmentContext(sd *SharedDomains, mode commitment.Mode, trieVariant commitment.TrieVariant) *SharedDomainsCommitmentContext { ctx := &SharedDomainsCommitmentContext{ - sd: sd, - mode: mode, - updates: NewUpdateTree(mode), - discard: dbg.DiscardCommitment(), - patriciaTrie: commitment.InitializeTrie(trieVariant), - branchCache: make(map[string]cachedBranch), + sd: sd, + mode: mode, + discard: dbg.DiscardCommitment(), + branches: make(map[string]cachedBranch), + keccak: sha3.NewLegacyKeccak256().(cryptozerocopy.KeccakState), } + ctx.patriciaTrie, ctx.updates = commitment.InitializeTrieAndUpdateTree(trieVariant, mode, sd.aggCtx.a.tmpdir) ctx.patriciaTrie.ResetContext(ctx) return ctx } +func (sdc *SharedDomainsCommitmentContext) Close() { + sdc.updates.Close() +} + type cachedBranch struct { data []byte step uint64 } -// Cache should ResetBranchCache after each commitment computation +// ResetBranchCache should be called after each commitment computation func (sdc *SharedDomainsCommitmentContext) ResetBranchCache() { - sdc.branchCache = make(map[string]cachedBranch) + clear(sdc.branches) } func (sdc *SharedDomainsCommitmentContext) GetBranch(pref []byte) ([]byte, uint64, error) { - cached, ok := sdc.branchCache[string(pref)] + cached, ok := sdc.branches[string(pref)] if ok { // cached value is already transformed/clean to read. // Cache should ResetBranchCache after each commitment computation @@ -947,12 +959,13 @@ func (sdc *SharedDomainsCommitmentContext) GetBranch(pref []byte) ([]byte, uint6 if sdc.sd.trace { fmt.Printf("[SDC] GetBranch: %x: %x\n", pref, v) } + // Trie reads prefix during unfold and after everything is ready reads it again to Merge update, if any, so + // cache branch until ResetBranchCache called + sdc.branches[string(pref)] = cachedBranch{data: v, step: step} + if len(v) == 0 { return nil, 0, nil } - // Trie reads prefix during unfold and after everything is ready reads it again to Merge update, if any, so - // cache branch until ResetBranchCache called - sdc.branchCache[string(pref)] = cachedBranch{data: v, step: step} return v, step, nil } @@ -960,7 +973,7 @@ func (sdc *SharedDomainsCommitmentContext) PutBranch(prefix []byte, data []byte, if sdc.sd.trace { fmt.Printf("[SDC] PutBranch: %x: %x\n", prefix, data) } - sdc.branchCache[string(prefix)] = cachedBranch{data: data, step: prevStep} + sdc.branches[string(prefix)] = cachedBranch{data: data, step: prevStep} return sdc.sd.updateCommitmentData(prefix, data, prevData, prevStep) } @@ -978,7 +991,10 @@ func (sdc *SharedDomainsCommitmentContext) GetAccount(plainKey []byte, cell *com if len(chash) > 0 { copy(cell.CodeHash[:], chash) } - //fmt.Printf("GetAccount: %x: n=%d b=%d ch=%x\n", plainKey, nonce, balance, chash) + } + if bytes.Equal(cell.CodeHash[:], commitment.EmptyCodeHash) { + cell.Delete = len(encAccount) == 0 + return nil } code, _, err := sdc.sd.DomainGet(kv.CodeDomain, plainKey, nil) @@ -986,10 +1002,9 @@ func (sdc *SharedDomainsCommitmentContext) GetAccount(plainKey []byte, cell *com return fmt.Errorf("GetAccount: failed to read latest code: %w", err) } if len(code) > 0 { - //fmt.Printf("GetAccount: code %x - %x\n", plainKey, code) - sdc.updates.keccak.Reset() - sdc.updates.keccak.Write(code) - sdc.updates.keccak.Read(cell.CodeHash[:]) + sdc.keccak.Reset() + sdc.keccak.Write(code) + sdc.keccak.Read(cell.CodeHash[:]) } else { cell.CodeHash = commitment.EmptyCodeHashArray } @@ -1003,9 +1018,6 @@ func (sdc *SharedDomainsCommitmentContext) GetStorage(plainKey []byte, cell *com if err != nil { return err } - //if sdc.sd.trace { - // fmt.Printf("[SDC] GetStorage: %x - %x\n", plainKey, enc) - //} cell.StorageLen = len(enc) copy(cell.Storage[:], enc) cell.Delete = cell.StorageLen == 0 @@ -1022,54 +1034,46 @@ func (sdc *SharedDomainsCommitmentContext) TempDir() string { return sdc.sd.aggCtx.a.dirs.Tmp } -//func (ctx *SharedDomainsCommitmentContext) Hasher() hash.Hash { return ctx.updates.keccak } -// -//func (ctx *SharedDomainsCommitmentContext) SetCommitmentMode(m CommitmentMode) { ctx.mode = m } -// +func (sdc *SharedDomainsCommitmentContext) KeysCount() uint64 { + return sdc.updates.Size() +} // TouchPlainKey marks plainKey as updated and applies different fn for different key types // (different behaviour for Code, Account and Storage key modifications). -func (sdc *SharedDomainsCommitmentContext) TouchPlainKey(key string, val []byte, fn func(c *commitmentItem, val []byte)) { +func (sdc *SharedDomainsCommitmentContext) TouchKey(d kv.Domain, key string, val []byte) { if sdc.discard { return } - sdc.updates.TouchPlainKey(key, val, fn) -} - -func (sdc *SharedDomainsCommitmentContext) KeysCount() uint64 { - return sdc.updates.Size() -} - -func (sdc *SharedDomainsCommitmentContext) TouchAccount(c *commitmentItem, val []byte) { - sdc.updates.TouchAccount(c, val) -} - -func (sdc *SharedDomainsCommitmentContext) TouchStorage(c *commitmentItem, val []byte) { - sdc.updates.TouchStorage(c, val) -} - -func (sdc *SharedDomainsCommitmentContext) TouchCode(c *commitmentItem, val []byte) { - sdc.updates.TouchCode(c, val) + ks := []byte(key) + switch d { + case kv.AccountsDomain: + sdc.updates.TouchPlainKey(ks, val, sdc.updates.TouchAccount) + case kv.CodeDomain: + sdc.updates.TouchPlainKey(ks, val, sdc.updates.TouchCode) + case kv.StorageDomain: + sdc.updates.TouchPlainKey(ks, val, sdc.updates.TouchStorage) + default: + panic(fmt.Errorf("TouchKey: unknown domain %s", d)) + } } // Evaluates commitment for processed state. -func (sdc *SharedDomainsCommitmentContext) ComputeCommitment(ctext context.Context, saveState bool, blockNum uint64, logPrefix string) (rootHash []byte, err error) { +func (sdc *SharedDomainsCommitmentContext) ComputeCommitment(ctx context.Context, saveState bool, blockNum uint64, logPrefix string) (rootHash []byte, err error) { defer sdc.ResetBranchCache() if dbg.DiscardCommitment() { sdc.updates.List(true) return nil, nil } + mxCommitmentRunning.Inc() defer mxCommitmentRunning.Dec() defer func(s time.Time) { mxCommitmentTook.ObserveDuration(s) }(time.Now()) - touchedKeys, updates := sdc.updates.List(true) + updateCount := sdc.updates.Size() if sdc.sd.trace { - defer func() { - fmt.Printf("[SDC] rootHash %x block %d keys %d mode %s\n", rootHash, blockNum, len(touchedKeys), sdc.mode) - }() + defer sdc.sd.logger.Trace("ComputeCommitment", "block", blockNum, "keys", updateCount, "mode", sdc.mode) } - if len(touchedKeys) == 0 { + if updateCount == 0 { rootHash, err = sdc.patriciaTrie.RootHash() return rootHash, err } @@ -1079,17 +1083,18 @@ func (sdc *SharedDomainsCommitmentContext) ComputeCommitment(ctext context.Conte sdc.Reset() switch sdc.mode { - case CommitmentModeDirect: - rootHash, err = sdc.patriciaTrie.ProcessKeys(ctext, touchedKeys, logPrefix) + case commitment.ModeDirect: + rootHash, err = sdc.patriciaTrie.ProcessTree(ctx, sdc.updates, logPrefix) if err != nil { return nil, err } - case CommitmentModeUpdate: - rootHash, err = sdc.patriciaTrie.ProcessUpdates(ctext, touchedKeys, updates) + case commitment.ModeUpdate: + touchedKeys, updates := sdc.updates.List(true) + rootHash, err = sdc.patriciaTrie.ProcessUpdates(ctx, touchedKeys, updates) if err != nil { return nil, err } - case CommitmentModeDisabled: + case commitment.ModeDisabled: return nil, nil default: return nil, fmt.Errorf("invalid commitment mode: %s", sdc.mode) diff --git a/erigon-lib/state/domain_shared_bench_test.go b/erigon-lib/state/domain_shared_bench_test.go index 0bbabd84ab1..e4890f8f1ae 100644 --- a/erigon-lib/state/domain_shared_bench_test.go +++ b/erigon-lib/state/domain_shared_bench_test.go @@ -85,8 +85,7 @@ func Benchmark_SharedDomains_GetLatest(t *testing.B) { require.NoError(t, err) } } - //}) - //t.Run("GetHistory", func(t *testing.B) { + for ik := 0; ik < t.N; ik++ { for i := 0; i < len(keys); i++ { ts := uint64(rnd.Intn(int(maxTx))) @@ -98,6 +97,47 @@ func Benchmark_SharedDomains_GetLatest(t *testing.B) { require.NoError(t, err) } } - //}) +} + +func BenchmarkSharedDomains_ComputeCommitment(b *testing.B) { + b.StopTimer() + + stepSize := uint64(100) + db, agg := testDbAndAggregatorBench(b, stepSize) + + ctx := context.Background() + rwTx, err := db.BeginRw(ctx) + require.NoError(b, err) + defer rwTx.Rollback() + + ac := agg.BeginFilesRo() + defer ac.Close() + + domains, err := NewSharedDomains(WrapTxWithCtx(rwTx, ac), log.New()) + require.NoError(b, err) + defer domains.Close() + + maxTx := stepSize * 17 + data := generateTestDataForDomainCommitment(b, length.Addr, length.Addr+length.Hash, maxTx, 15, 100) + require.NotNil(b, data) + + for domName, d := range data { + fom := kv.AccountsDomain + if domName == "storage" { + fom = kv.StorageDomain + } + for key, upd := range d { + for _, u := range upd { + domains.SetTxNum(u.txNum) + err := domains.DomainPut(fom, []byte(key), nil, u.value, nil, 0) + require.NoError(b, err) + } + } + } + b.StartTimer() + for i := 0; i < b.N; i++ { + _, err := domains.ComputeCommitment(ctx, true, domains.BlockNum(), "") + require.NoError(b, err) + } } diff --git a/erigon-lib/state/domain_test.go b/erigon-lib/state/domain_test.go index 43643643890..40716a58441 100644 --- a/erigon-lib/state/domain_test.go +++ b/erigon-lib/state/domain_test.go @@ -1435,8 +1435,10 @@ func generateTestDataForDomainCommitment(tb testing.TB, keySize1, keySize2, tota key1 := generateRandomKey(r, keySize1) accs[key1] = generateAccountUpdates(r, totalTx, keyTxsLimit) key2 := key1 + generateRandomKey(r, keySize2-keySize1) - stor[key2] = generateStorageUpdates(r, totalTx, keyTxsLimit) + stor[key2] = generateArbitraryValueUpdates(r, totalTx, keyTxsLimit, 32) } + doms["accounts"] = accs + doms["storage"] = stor return doms } @@ -1494,14 +1496,15 @@ func generateAccountUpdates(r *rand.Rand, totalTx, keyTxsLimit uint64) []upd { return updates } -func generateStorageUpdates(r *rand.Rand, totalTx, keyTxsLimit uint64) []upd { +func generateArbitraryValueUpdates(r *rand.Rand, totalTx, keyTxsLimit, maxSize uint64) []upd { updates := make([]upd, 0) usedTxNums := make(map[uint64]bool) + //maxStorageSize := 24 * (1 << 10) // limit on contract code for i := uint64(0); i < keyTxsLimit; i++ { txNum := generateRandomTxNum(r, totalTx, usedTxNums) - value := make([]byte, r.Intn(24*(1<<10))) + value := make([]byte, r.Intn(int(maxSize))) r.Read(value) updates = append(updates, upd{txNum: txNum, value: value}) @@ -2510,7 +2513,9 @@ func TestDomainContext_findShortenedKey(t *testing.T) { lastFile := findFile(st, en) require.NotNilf(t, lastFile, "%d-%d", st/dc.d.aggregationStep, en/dc.d.aggregationStep) - shortenedKey, found := dc.findShortenedKey([]byte(key), lastFile) + lf := NewArchiveGetter(lastFile.decompressor.MakeGetter(), d.compression) + + shortenedKey, found := dc.findShortenedKey([]byte(key), lf, lastFile) require.Truef(t, found, "key %d/%d %x file %d %d %s", ki, len(data), []byte(key), lastFile.startTxNum, lastFile.endTxNum, lastFile.decompressor.FileName()) require.NotNil(t, shortenedKey) ki++ diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index 6da34bf29d0..9c391c771d1 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -343,6 +343,7 @@ func unwindExec3(u *UnwindState, s *StageState, txc wrap.TxContainer, ctx contex //if ok && bn != u.UnwindPoint { // return fmt.Errorf("commitment can unwind only to block: %d, requested: %d. UnwindTo was called with wrong value", bn, u.UnwindPoint) //} + start := time.Now() unwindToLimit, err := txc.Tx.(libstate.HasAggCtx).AggCtx().(*libstate.AggregatorRoTx).CanUnwindDomainsToBlockNum(txc.Tx) if err != nil { @@ -382,7 +383,11 @@ func unwindExec3(u *UnwindState, s *StageState, txc wrap.TxContainer, ctx contex return fmt.Errorf("delete newer epochs: %w", err) } - return domains.Flush(ctx, txc.Tx) + if err = domains.Flush(ctx, txc.Tx); err != nil { + return fmt.Errorf("uwind flush domains: %w", err) + } + fmt.Printf("unwindv3: %d -> %d done within %s\n", s.BlockNumber, u.UnwindPoint, time.Since(start)) + return nil } func senderStageProgress(tx kv.Tx, db kv.RoDB) (prevStageProgress uint64, err error) { diff --git a/eth/stagedsync/stage_trie3.go b/eth/stagedsync/stage_trie3.go index 250f828385f..1e234804ced 100644 --- a/eth/stagedsync/stage_trie3.go +++ b/eth/stagedsync/stage_trie3.go @@ -91,7 +91,7 @@ func collectAndComputeCommitment(ctx context.Context, tx kv.RwTx, tmpDir string, processed atomic.Uint64 ) - sdCtx := state.NewSharedDomainsCommitmentContext(domains, state.CommitmentModeDirect, commitment.VariantHexPatriciaTrie) + sdCtx := state.NewSharedDomainsCommitmentContext(domains, commitment.ModeDirect, commitment.VariantHexPatriciaTrie) loadKeys := func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { if sdCtx.KeysCount() >= batchSize { @@ -104,7 +104,7 @@ func collectAndComputeCommitment(ctx context.Context, tx kv.RwTx, tmpDir string, "intermediate root", fmt.Sprintf("%x", rh)) } processed.Add(1) - sdCtx.TouchPlainKey(string(k), nil, nil) + sdCtx.TouchKey(kv.AccountsDomain, string(k), nil) return nil } From 4423b6ee2d7a4262725ad8fd9cead4a2e1525173 Mon Sep 17 00:00:00 2001 From: Mark Holt <135143369+mh0lt@users.noreply.github.com> Date: Fri, 10 May 2024 18:29:08 +0100 Subject: [PATCH 19/48] Limit downloads by pieces as well as files (#10261) This PR contains a change to the way the downloader limits throughput it adds a limit by both pieces and files. This means that the download rate is not affected by having a lot of small files which have a start-up overhead in the torrent. The slot limit is now applied as follows: Limit the amount of files to be processed, but if the pieces in those files do not use all of the bandwidth specified by `torrent.download.rate` add additional files until that is the case. In testing this leads to a more consistent download rate. This does not fix the drop of in download rate at the end of the download process - where as the number of downloaded files reduces so does the consumed bandwidth. It seems that files download at 5-10MB/s - due to internal throttling in the torrent lib, this needs further investigation. --------- Co-authored-by: Dmytro --- erigon-lib/diagnostics/entities.go | 26 +- erigon-lib/diagnostics/snapshots.go | 96 ++++++- erigon-lib/diagnostics/snapshots_test.go | 85 ++++++ erigon-lib/downloader/downloader.go | 249 ++++++++++++------ .../downloader/downloadercfg/downloadercfg.go | 5 + erigon-lib/downloader/downloadercfg/logger.go | 4 +- erigon-lib/downloader/webseed.go | 6 +- erigon-lib/go.mod | 2 +- erigon-lib/go.sum | 4 +- go.mod | 2 +- go.sum | 4 +- 11 files changed, 391 insertions(+), 92 deletions(-) create mode 100644 erigon-lib/diagnostics/snapshots_test.go diff --git a/erigon-lib/diagnostics/entities.go b/erigon-lib/diagnostics/entities.go index e7e75c91b12..8d397985f22 100644 --- a/erigon-lib/diagnostics/entities.go +++ b/erigon-lib/diagnostics/entities.go @@ -77,11 +77,23 @@ type SnapshotDownloadStatistics struct { } type SegmentDownloadStatistics struct { - Name string `json:"name"` - TotalBytes uint64 `json:"totalBytes"` - DownloadedBytes uint64 `json:"downloadedBytes"` - Webseeds []SegmentPeer `json:"webseeds"` - Peers []SegmentPeer `json:"peers"` + Name string `json:"name"` + TotalBytes uint64 `json:"totalBytes"` + DownloadedBytes uint64 `json:"downloadedBytes"` + Webseeds []SegmentPeer `json:"webseeds"` + Peers []SegmentPeer `json:"peers"` + DownloadedStats FileDownloadedStatistics `json:"downloadedStats"` +} + +type FileDownloadedStatistics struct { + TimeTook float64 `json:"timeTook"` + AverageRate uint64 `json:"averageRate"` +} + +type FileDownloadedStatisticsUpdate struct { + FileName string `json:"fileName"` + TimeTook float64 `json:"timeTook"` + AverageRate uint64 `json:"averageRate"` } type SegmentPeer struct { @@ -247,6 +259,10 @@ type NetworkSpeedTestResult struct { UploadSpeed float64 `json:"uploadSpeed"` } +func (ti FileDownloadedStatisticsUpdate) Type() Type { + return TypeOf(ti) +} + func (ti MemoryStats) Type() Type { return TypeOf(ti) } diff --git a/erigon-lib/diagnostics/snapshots.go b/erigon-lib/diagnostics/snapshots.go index 25f636c8d29..97f0941083e 100644 --- a/erigon-lib/diagnostics/snapshots.go +++ b/erigon-lib/diagnostics/snapshots.go @@ -12,6 +12,7 @@ func (d *DiagnosticClient) setupSnapshotDiagnostics(rootCtx context.Context) { d.runSegmentIndexingListener(rootCtx) d.runSegmentIndexingFinishedListener(rootCtx) d.runSnapshotFilesListListener(rootCtx) + d.runFileDownloadedListener(rootCtx) } func (d *DiagnosticClient) runSnapshotListener(rootCtx context.Context) { @@ -65,7 +66,17 @@ func (d *DiagnosticClient) runSegmentDownloadingListener(rootCtx context.Context d.syncStats.SnapshotDownload.SegmentsDownloading = map[string]SegmentDownloadStatistics{} } - d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name] = info + if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name]; ok { + val.TotalBytes = info.TotalBytes + val.DownloadedBytes = info.DownloadedBytes + val.Webseeds = info.Webseeds + val.Peers = info.Peers + + d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name] = val + } else { + d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name] = info + } + d.mu.Unlock() } } @@ -173,6 +184,89 @@ func (d *DiagnosticClient) runSnapshotFilesListListener(rootCtx context.Context) }() } +func (d *DiagnosticClient) runFileDownloadedListener(rootCtx context.Context) { + go func() { + ctx, ch, closeChannel := Context[FileDownloadedStatisticsUpdate](rootCtx, 1) + defer closeChannel() + + StartProviders(ctx, TypeOf(FileDownloadedStatisticsUpdate{}), log.Root()) + for { + select { + case <-rootCtx.Done(): + return + case info := <-ch: + d.mu.Lock() + + if d.syncStats.SnapshotDownload.SegmentsDownloading == nil { + d.syncStats.SnapshotDownload.SegmentsDownloading = map[string]SegmentDownloadStatistics{} + } + + if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[info.FileName]; ok { + val.DownloadedStats = FileDownloadedStatistics{ + TimeTook: info.TimeTook, + AverageRate: info.AverageRate, + } + + d.syncStats.SnapshotDownload.SegmentsDownloading[info.FileName] = val + } else { + d.syncStats.SnapshotDownload.SegmentsDownloading[info.FileName] = SegmentDownloadStatistics{ + Name: info.FileName, + TotalBytes: 0, + DownloadedBytes: 0, + Webseeds: nil, + Peers: nil, + DownloadedStats: FileDownloadedStatistics{ + TimeTook: info.TimeTook, + AverageRate: info.AverageRate, + }, + } + } + + d.mu.Unlock() + } + } + }() +} + +func (d *DiagnosticClient) UpdateFileDownloadedStatistics(downloadedInfo *FileDownloadedStatisticsUpdate, downloadingInfo *SegmentDownloadStatistics) { + if d.syncStats.SnapshotDownload.SegmentsDownloading == nil { + d.syncStats.SnapshotDownload.SegmentsDownloading = map[string]SegmentDownloadStatistics{} + } + + if downloadedInfo != nil { + dwStats := FileDownloadedStatistics{ + TimeTook: downloadedInfo.TimeTook, + AverageRate: downloadedInfo.AverageRate, + } + if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[downloadedInfo.FileName]; ok { + val.DownloadedStats = dwStats + + d.syncStats.SnapshotDownload.SegmentsDownloading[downloadedInfo.FileName] = val + } else { + d.syncStats.SnapshotDownload.SegmentsDownloading[downloadedInfo.FileName] = SegmentDownloadStatistics{ + Name: downloadedInfo.FileName, + TotalBytes: 0, + DownloadedBytes: 0, + Webseeds: make([]SegmentPeer, 0), + Peers: make([]SegmentPeer, 0), + DownloadedStats: dwStats, + } + } + } else { + if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[downloadingInfo.Name]; ok { + val.TotalBytes = downloadingInfo.TotalBytes + val.DownloadedBytes = downloadingInfo.DownloadedBytes + val.Webseeds = downloadingInfo.Webseeds + val.Peers = downloadingInfo.Peers + + d.syncStats.SnapshotDownload.SegmentsDownloading[downloadingInfo.Name] = val + } else { + d.syncStats.SnapshotDownload.SegmentsDownloading[downloadingInfo.Name] = *downloadingInfo + } + } + +} + func (d *DiagnosticClient) SyncStatistics() SyncStatistics { return d.syncStats } diff --git a/erigon-lib/diagnostics/snapshots_test.go b/erigon-lib/diagnostics/snapshots_test.go new file mode 100644 index 00000000000..9f56f9f4364 --- /dev/null +++ b/erigon-lib/diagnostics/snapshots_test.go @@ -0,0 +1,85 @@ +package diagnostics_test + +import ( + "testing" + + "github.com/ledgerwatch/erigon-lib/diagnostics" + "github.com/stretchr/testify/require" +) + +func TestUpdateFileDownloadingStats(t *testing.T) { + d := diagnostics.NewDiagnosticClient(nil, "test") + + d.UpdateFileDownloadedStatistics(nil, &segmentDownloadStatsMock) + + sd := d.SyncStatistics().SnapshotDownload.SegmentsDownloading + require.NotNil(t, sd) + require.NotEqual(t, len(sd), 0) + + require.Equal(t, sd["test"], segmentDownloadStatsMock) +} + +func TestUpdateFileDownloadedStats(t *testing.T) { + d := diagnostics.NewDiagnosticClient(nil, "test") + + d.UpdateFileDownloadedStatistics(&fileDownloadedUpdMock, nil) + + sd := d.SyncStatistics().SnapshotDownload.SegmentsDownloading + require.NotNil(t, sd) + require.NotEqual(t, len(sd), 0) + + require.Equal(t, sd["test"], diagnostics.SegmentDownloadStatistics{ + Name: "test", + TotalBytes: 0, + DownloadedBytes: 0, + Webseeds: make([]diagnostics.SegmentPeer, 0), + Peers: make([]diagnostics.SegmentPeer, 0), + DownloadedStats: diagnostics.FileDownloadedStatistics{ + TimeTook: 1.0, + AverageRate: 1, + }, + }) +} + +func TestUpdateFileFullStatsUpdate(t *testing.T) { + d := diagnostics.NewDiagnosticClient(nil, "test") + + d.UpdateFileDownloadedStatistics(nil, &segmentDownloadStatsMock) + + sd := d.SyncStatistics().SnapshotDownload.SegmentsDownloading + require.NotNil(t, sd) + require.NotEqual(t, len(sd), 0) + + require.Equal(t, sd["test"], segmentDownloadStatsMock) + + d.UpdateFileDownloadedStatistics(&fileDownloadedUpdMock, nil) + + require.Equal(t, sd["test"], diagnostics.SegmentDownloadStatistics{ + Name: "test", + TotalBytes: 1, + DownloadedBytes: 1, + Webseeds: make([]diagnostics.SegmentPeer, 0), + Peers: make([]diagnostics.SegmentPeer, 0), + DownloadedStats: diagnostics.FileDownloadedStatistics{ + TimeTook: 1.0, + AverageRate: 1, + }, + }) +} + +var ( + fileDownloadedUpdMock = diagnostics.FileDownloadedStatisticsUpdate{ + FileName: "test", + TimeTook: 1.0, + AverageRate: 1, + } + + segmentDownloadStatsMock = diagnostics.SegmentDownloadStatistics{ + Name: "test", + TotalBytes: 1, + DownloadedBytes: 1, + Webseeds: make([]diagnostics.SegmentPeer, 0), + Peers: make([]diagnostics.SegmentPeer, 0), + DownloadedStats: diagnostics.FileDownloadedStatistics{}, + } +) diff --git a/erigon-lib/downloader/downloader.go b/erigon-lib/downloader/downloader.go index 24316d406fd..ab2fc0368dd 100644 --- a/erigon-lib/downloader/downloader.go +++ b/erigon-lib/downloader/downloader.go @@ -92,10 +92,16 @@ type Downloader struct { torrentFS *AtomicTorrentFS snapshotLock *snapshotLock webDownloadInfo map[string]webDownloadInfo - downloading map[string]struct{} + downloading map[string]*downloadInfo downloadLimit *rate.Limit } +type downloadInfo struct { + torrent *torrent.Torrent + time time.Time + progress float32 +} + type webDownloadInfo struct { url *url.URL length int64 @@ -103,11 +109,6 @@ type webDownloadInfo struct { torrent *torrent.Torrent } -type downloadProgress struct { - time time.Time - progress float32 -} - type AggStats struct { MetadataReady, FilesTotal int32 LastMetadataUpdate *time.Time @@ -132,7 +133,6 @@ type AggStats struct { WebseedBytesDownload *atomic.Int64 lastTorrentStatus time.Time - downloadProgress map[string]downloadProgress } type requestHandler struct { @@ -292,7 +292,6 @@ func New(ctx context.Context, cfg *downloadercfg.Cfg, logger log.Logger, verbosi WebseedBytesDownload: &atomic.Int64{}, WebseedDiscardCount: &atomic.Int64{}, WebseedServerFails: &atomic.Int64{}, - downloadProgress: map[string]downloadProgress{}, } snapLock, err := getSnapshotLock(ctx, cfg, db, &stats, mutex, logger) @@ -315,7 +314,7 @@ func New(ctx context.Context, cfg *downloadercfg.Cfg, logger log.Logger, verbosi snapshotLock: snapLock, webDownloadInfo: map[string]webDownloadInfo{}, webDownloadSessions: map[string]*RCloneSession{}, - downloading: map[string]struct{}{}, + downloading: map[string]*downloadInfo{}, webseedsDiscover: discover, } d.webseeds.SetTorrent(d.torrentFS, snapLock.Downloads, cfg.DownloadTorrentFilesFromWebseed) @@ -831,7 +830,15 @@ func (d *Downloader) mainLoop(silent bool) error { }() } - var sem = semaphore.NewWeighted(int64(d.cfg.DownloadSlots)) + fileSlots := d.cfg.DownloadSlots + + var pieceSlots int + + if d.downloadLimit != nil { + pieceSlots = int(math.Round(float64(*d.downloadLimit / rate.Limit(downloadercfg.DefaultPieceSize)))) + } else { + pieceSlots = int(512 * datasize.MB / downloadercfg.DefaultPieceSize) + } //TODO: feature is not ready yet //d.webDownloadClient, _ = NewRCloneClient(d.logger) @@ -855,6 +862,8 @@ func (d *Downloader) mainLoop(silent bool) error { checkGroup, _ := errgroup.WithContext(d.ctx) checkGroup.SetLimit(runtime.GOMAXPROCS(-1) * 4) + lastIntMult := time.Now() + for { torrents := d.torrentClient.Torrents() @@ -925,6 +934,7 @@ func (d *Downloader) mainLoop(silent bool) error { if _, ok := failed[t.Name()]; ok { continue } + d.lock.RLock() _, downloading := d.downloading[t.Name()] d.lock.RUnlock() @@ -1068,7 +1078,19 @@ func (d *Downloader) mainLoop(silent bool) error { d.stats.Downloading = int32(downloadingLen) d.lock.RUnlock() - available := availableTorrents(d.ctx, pending, d.cfg.DownloadSlots-downloadingLen) + // the call interval of the loop (elapsed sec) used to get slots/sec for + // calculating the number of files to download based on the loop speed + intervalMultiplier := int(time.Since(lastIntMult).Seconds()) + + // min and max here are taken from the torrent peer config + switch { + case intervalMultiplier < 16: + intervalMultiplier = 16 + case intervalMultiplier > 128: + intervalMultiplier = 128 + } + + available := availableTorrents(d.ctx, pending, d.downloading, fileSlots, pieceSlots*intervalMultiplier) d.lock.RLock() for _, webDownload := range d.webDownloadInfo { @@ -1175,7 +1197,7 @@ func (d *Downloader) mainLoop(silent bool) error { case len(t.PeerConns()) > 0: d.logger.Debug("[snapshots] Downloading from BitTorrent", "file", t.Name(), "peers", len(t.PeerConns()), "webpeers", len(t.WebseedPeerConns())) delete(waiting, t.Name()) - d.torrentDownload(t, downloadComplete, sem) + d.torrentDownload(t, downloadComplete) case len(t.WebseedPeerConns()) > 0: if d.webDownloadClient != nil { var peerUrls []*url.URL @@ -1188,21 +1210,21 @@ func (d *Downloader) mainLoop(silent bool) error { d.logger.Debug("[snapshots] Downloading from webseed", "file", t.Name(), "webpeers", len(t.WebseedPeerConns())) delete(waiting, t.Name()) - session, err := d.webDownload(peerUrls, t, nil, downloadComplete, sem) + session, err := d.webDownload(peerUrls, t, nil, downloadComplete) if err != nil { d.logger.Warn("Can't complete web download", "file", t.Info().Name, "err", err) if session == nil { delete(waiting, t.Name()) - d.torrentDownload(t, downloadComplete, sem) + d.torrentDownload(t, downloadComplete) } continue } } else { d.logger.Debug("[snapshots] Downloading from torrent", "file", t.Name(), "peers", len(t.PeerConns()), "webpeers", len(t.WebseedPeerConns())) delete(waiting, t.Name()) - d.torrentDownload(t, downloadComplete, sem) + d.torrentDownload(t, downloadComplete) } default: if d.webDownloadClient != nil { @@ -1241,13 +1263,13 @@ func (d *Downloader) mainLoop(silent bool) error { d.logger.Debug("[snapshots] Downloading from web", "file", t.Name(), "webpeers", len(t.WebseedPeerConns())) delete(waiting, t.Name()) - d.webDownload([]*url.URL{peerUrl}, t, &webDownload, downloadComplete, sem) + d.webDownload([]*url.URL{peerUrl}, t, &webDownload, downloadComplete) continue } d.logger.Debug("[snapshots] Downloading from torrent", "file", t.Name(), "peers", len(t.PeerConns())) delete(waiting, t.Name()) - d.torrentDownload(t, downloadComplete, sem) + d.torrentDownload(t, downloadComplete) } } @@ -1296,6 +1318,7 @@ func (d *Downloader) mainLoop(silent bool) error { } } } + } }() @@ -1470,21 +1493,17 @@ func getWebpeerTorrentInfo(ctx context.Context, downloadUrl *url.URL) (*metainfo return metainfo.Load(torrentResponse.Body) } -func (d *Downloader) torrentDownload(t *torrent.Torrent, statusChan chan downloadStatus, sem *semaphore.Weighted) { +func (d *Downloader) torrentDownload(t *torrent.Torrent, statusChan chan downloadStatus) { d.lock.Lock() - d.downloading[t.Name()] = struct{}{} + d.downloading[t.Name()] = &downloadInfo{torrent: t} d.lock.Unlock() - if err := sem.Acquire(d.ctx, 1); err != nil { - d.logger.Warn("Failed to acquire download semaphore", "err", err) - return - } - d.wg.Add(1) go func(t *torrent.Torrent) { defer d.wg.Done() - defer sem.Release(1) + + downloadStarted := time.Now() t.AllowDataDownload() @@ -1504,6 +1523,18 @@ func (d *Downloader) torrentDownload(t *torrent.Torrent, statusChan chan downloa case <-d.ctx.Done(): return case <-t.Complete.On(): + downloadTime := time.Since(downloadStarted) + downloaded := t.Stats().BytesReadUsefulData + + diagnostics.Send(diagnostics.FileDownloadedStatisticsUpdate{ + FileName: t.Name(), + TimeTook: downloadTime.Seconds(), + AverageRate: uint64(float64(downloaded.Int64()) / downloadTime.Seconds()), + }) + + d.logger.Debug("[snapshots] Downloaded from BitTorrent", "file", t.Name(), + "download-time", downloadTime.Round(time.Second).String(), "downloaded", common.ByteCount(uint64(downloaded.Int64())), + "rate", fmt.Sprintf("%s/s", common.ByteCount(uint64(float64(downloaded.Int64())/downloadTime.Seconds())))) return case <-time.After(10 * time.Second): bytesRead := t.Stats().BytesReadData @@ -1525,7 +1556,7 @@ func (d *Downloader) torrentDownload(t *torrent.Torrent, statusChan chan downloa }(t) } -func (d *Downloader) webDownload(peerUrls []*url.URL, t *torrent.Torrent, i *webDownloadInfo, statusChan chan downloadStatus, sem *semaphore.Weighted) (*RCloneSession, error) { +func (d *Downloader) webDownload(peerUrls []*url.URL, t *torrent.Torrent, i *webDownloadInfo, statusChan chan downloadStatus) (*RCloneSession, error) { if d.webDownloadClient == nil { return nil, fmt.Errorf("webdownload client not enabled") } @@ -1581,19 +1612,13 @@ func (d *Downloader) webDownload(peerUrls []*url.URL, t *torrent.Torrent, i *web d.lock.Lock() t.Drop() - d.downloading[name] = struct{}{} + d.downloading[name] = &downloadInfo{torrent: t} d.lock.Unlock() d.wg.Add(1) - if err := sem.Acquire(d.ctx, 1); err != nil { - d.logger.Warn("Failed to acquire download semaphore", "err", err) - return nil, err - } - go func() { defer d.wg.Done() - defer sem.Release(1) if dir.FileExist(info.Path) { if err := os.Remove(info.Path); err != nil { @@ -1699,8 +1724,25 @@ func selectDownloadPeer(ctx context.Context, peerUrls []*url.URL, t *torrent.Tor return "", fmt.Errorf("can't find download peer") } -func availableTorrents(ctx context.Context, pending []*torrent.Torrent, slots int) []*torrent.Torrent { - if slots == 0 { +func availableTorrents(ctx context.Context, pending []*torrent.Torrent, downloading map[string]*downloadInfo, fileSlots int, pieceSlots int) []*torrent.Torrent { + + piecesDownloading := 0 + pieceRemainder := int64(0) + + for _, info := range downloading { + if info.torrent.NumPieces() == 1 { + pieceRemainder += info.torrent.Info().Length + + if pieceRemainder >= downloadercfg.DefaultPieceSize { + pieceRemainder = 0 + piecesDownloading++ + } + } else { + piecesDownloading += info.torrent.NumPieces() - info.torrent.Stats().PiecesComplete + } + } + + if len(downloading) >= fileSlots && piecesDownloading > pieceSlots { select { case <-ctx.Done(): return nil @@ -1739,7 +1781,18 @@ func availableTorrents(ctx context.Context, pending []*torrent.Torrent, slots in for len(pending) > 0 && pending[0].Info() != nil { available = append(available, pending[0]) - if len(available) == slots { + if pending[0].NumPieces() == 1 { + pieceRemainder += pending[0].Info().Length + + if pieceRemainder >= downloadercfg.DefaultPieceSize { + pieceRemainder = 0 + piecesDownloading++ + } + } else { + piecesDownloading += pending[0].NumPieces() + } + + if len(available) >= fileSlots && piecesDownloading > pieceSlots { return available } @@ -1748,7 +1801,7 @@ func availableTorrents(ctx context.Context, pending []*torrent.Torrent, slots in for len(pendingStateFiles) > 0 && pendingStateFiles[0].Info() != nil { available = append(available, pendingStateFiles[0]) - if len(available) == slots { + if len(available) >= fileSlots && piecesDownloading > pieceSlots { return available } @@ -1792,7 +1845,18 @@ func availableTorrents(ctx context.Context, pending []*torrent.Torrent, slots in default: available = append(available, pending[selected]) - if len(available) == slots { + if pending[selected].NumPieces() == 1 { + pieceRemainder += pending[selected].Info().Length + + if pieceRemainder >= downloadercfg.DefaultPieceSize { + pieceRemainder = 0 + piecesDownloading++ + } + } else { + piecesDownloading += pending[selected].NumPieces() + } + + if len(available) >= fileSlots && piecesDownloading > pieceSlots { return available } @@ -1829,15 +1893,40 @@ func (d *Downloader) torrentInfo(name string) (*torrentInfo, error) { } func (d *Downloader) ReCalcStats(interval time.Duration) { - d.lock.Lock() - defer d.lock.Unlock() - //Call this methods outside of `lock` critical section, because they have own locks with contention - torrents := d.torrentClient.Torrents() - connStats := d.torrentClient.ConnStats() + d.lock.RLock() + + torrentClient := d.torrentClient + peers := make(map[torrent.PeerID]struct{}, 16) prevStats, stats := d.stats, d.stats + logger := d.logger + verbosity := d.verbosity + + downloading := map[string]*downloadInfo{} + + for file, info := range d.downloading { + i := *info + downloading[file] = &i + } + + webDownloadClient := d.webDownloadClient + + webDownloadInfo := map[string]webDownloadInfo{} + + for key, value := range d.webDownloadInfo { + webDownloadInfo[key] = value + } + + ctx := d.ctx + + d.lock.RUnlock() + + //Call this methods outside of `lock` critical section, because they have own locks with contention + torrents := torrentClient.Torrents() + connStats := torrentClient.ConnStats() + stats.Completed = true stats.BytesDownload = uint64(connStats.BytesReadUsefulIntendedData.Int64()) stats.BytesUpload = uint64(connStats.BytesWrittenData.Int64()) @@ -1859,12 +1948,6 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { diagnostics.Send(diagnostics.SnapshoFilesList{Files: filesList}) } - downloading := map[string]float32{} - - for file := range d.downloading { - downloading[file] = 0 - } - var dbInfo int var tComplete int var torrentInfo int @@ -1900,14 +1983,11 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { } progress := float32(float64(100) * (float64(bytesCompleted) / float64(tLen))) - if _, ok := downloading[torrentName]; ok { - - if progress != stats.downloadProgress[torrentName].progress { - stats.downloadProgress[torrentName] = downloadProgress{time: time.Now(), progress: progress} + if info, ok := downloading[torrentName]; ok { + if progress != info.progress { + info.time = time.Now() + info.progress = progress } - } else { - // we only care about progress of downloading files - delete(stats.downloadProgress, torrentName) } stats.BytesCompleted += uint64(bytesCompleted) @@ -1922,11 +2002,15 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { rates, peers := getPeersRatesForlogs(peersOfThisFile, torrentName) if !torrentComplete { - if info, err := d.torrentInfo(torrentName); err == nil { + d.lock.RLock() + info, err := d.torrentInfo(torrentName) + d.lock.RUnlock() + + if err == nil { if info != nil { dbInfo++ } - } else if _, ok := d.webDownloadInfo[torrentName]; ok { + } else if _, ok := webDownloadInfo[torrentName]; ok { stats.MetadataReady++ } else { noMetadata = append(noMetadata, torrentName) @@ -1939,13 +2023,14 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { // more detailed statistic: download rate of each peer (for each file) if !torrentComplete && progress != 0 { - if _, ok := downloading[torrentName]; ok { - downloading[torrentName] = progress + if info, ok := downloading[torrentName]; ok { + info.time = time.Now() + info.progress = progress } - d.logger.Log(d.verbosity, "[snapshots] progress", "file", torrentName, "progress", fmt.Sprintf("%.2f%%", progress), "peers", len(peersOfThisFile), "webseeds", len(weebseedPeersOfThisFile)) - d.logger.Log(d.verbosity, "[snapshots] webseed peers", webseedRates...) - d.logger.Log(d.verbosity, "[snapshots] bittorrent peers", rates...) + logger.Log(verbosity, "[snapshots] progress", "file", torrentName, "progress", fmt.Sprintf("%.2f%%", progress), "peers", len(peersOfThisFile), "webseeds", len(weebseedPeersOfThisFile)) + logger.Log(verbosity, "[snapshots] webseed peers", webseedRates...) + logger.Log(verbosity, "[snapshots] bittorrent peers", rates...) } diagnostics.Send(diagnostics.SegmentDownloadStatistics{ @@ -1961,8 +2046,8 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { var webTransfers int32 - if d.webDownloadClient != nil { - webStats, _ := d.webDownloadClient.Stats(d.ctx) + if webDownloadClient != nil { + webStats, _ := webDownloadClient.Stats(ctx) if webStats != nil { if len(webStats.Transferring) != 0 && stats.Completed { @@ -2009,8 +2094,8 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { // more detailed statistic: download rate of each peer (for each file) if transfer.Percentage != 0 { - d.logger.Log(d.verbosity, "[snapshots] progress", "file", transferName, "progress", fmt.Sprintf("%.2f%%", float32(transfer.Percentage)), "webseeds", 1) - d.logger.Log(d.verbosity, "[snapshots] web peers", webseedRates...) + logger.Log(verbosity, "[snapshots] progress", "file", transferName, "progress", fmt.Sprintf("%.2f%%", float32(transfer.Percentage)), "webseeds", 1) + logger.Log(verbosity, "[snapshots] web peers", webseedRates...) } diagnostics.Send(diagnostics.SegmentDownloadStatistics{ @@ -2024,7 +2109,7 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { } if len(downloading) > 0 { - if d.webDownloadClient != nil { + if webDownloadClient != nil { webTransfers += int32(len(downloading)) } @@ -2032,7 +2117,7 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { } if !stats.Completed { - d.logger.Debug("[snapshots] info", + logger.Debug("[snapshots] info", "len", len(torrents), "webTransfers", webTransfers, "torrent", torrentInfo, @@ -2055,7 +2140,7 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { if len(noMetadata) > 5 { noMetadata = append(noMetadata[:5], "...") } - d.logger.Info("[snapshots] no metadata yet", "files", amount, "list", strings.Join(noMetadata, ",")) + logger.Info("[snapshots] no metadata yet", "files", amount, "list", strings.Join(noMetadata, ",")) } var noDownloadProgress []string @@ -2073,17 +2158,17 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { zeroProgress = append(zeroProgress[:5], "...") } - d.logger.Info("[snapshots] no progress yet", "files", amount, "list", strings.Join(zeroProgress, ",")) + logger.Info("[snapshots] no progress yet", "files", amount, "list", strings.Join(zeroProgress, ",")) } if len(downloading) > 0 { amount := len(downloading) files := make([]string, 0, len(downloading)) - for file, progress := range downloading { - files = append(files, fmt.Sprintf("%s (%.0f%%)", file, progress)) + for file, info := range downloading { + files = append(files, fmt.Sprintf("%s (%.0f%%)", file, info.progress)) - if dp, ok := stats.downloadProgress[file]; ok { + if dp, ok := downloading[file]; ok { if time.Since(dp.time) > 30*time.Minute { noDownloadProgress = append(noDownloadProgress, file) } @@ -2091,16 +2176,16 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { } sort.Strings(files) - d.logger.Log(d.verbosity, "[snapshots] downloading", "files", amount, "list", strings.Join(files, ", ")) + logger.Log(verbosity, "[snapshots] downloading", "files", amount, "list", strings.Join(files, ", ")) } if time.Since(stats.lastTorrentStatus) > 5*time.Minute { stats.lastTorrentStatus = time.Now() if len(noDownloadProgress) > 0 { - progressStatus := getProgressStatus(d.torrentClient, noDownloadProgress) + progressStatus := getProgressStatus(torrentClient, noDownloadProgress) for file, status := range progressStatus { - d.logger.Debug(fmt.Sprintf("[snapshots] torrent status: %s\n %s", file, + logger.Debug(fmt.Sprintf("[snapshots] torrent status: %s\n %s", file, string(bytes.TrimRight(bytes.ReplaceAll(status, []byte("\n"), []byte("\n ")), "\n ")))) } } @@ -2130,7 +2215,17 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { stats.PeersUnique = int32(len(peers)) stats.FilesTotal = int32(len(torrents)) + webTransfers + d.lock.Lock() d.stats = stats + + for file, info := range d.downloading { + if updated, ok := downloading[file]; ok { + info.time = updated.time + info.progress = updated.progress + } + } + + d.lock.Unlock() } type filterWriter struct { diff --git a/erigon-lib/downloader/downloadercfg/downloadercfg.go b/erigon-lib/downloader/downloadercfg/downloadercfg.go index 7f3e5bfaef0..10f24cabe4d 100644 --- a/erigon-lib/downloader/downloadercfg/downloadercfg.go +++ b/erigon-lib/downloader/downloadercfg/downloadercfg.go @@ -74,6 +74,11 @@ func Default() *torrent.ClientConfig { // *torrent.PeerConn: waiting for alloc limit reservation: reservation for 1802972 exceeds limiter max 1048576 torrentConfig.MaxAllocPeerRequestDataPerConn = int64(DefaultPieceSize) + // this limits the amount of unverified bytes - which will throttle the + // number of requests the torrent will handle - it acts as a brake on + // parallelism if set (default is 67,108,864) + torrentConfig.MaxUnverifiedBytes = 0 + // enable dht torrentConfig.NoDHT = true //torrentConfig.DisableTrackers = true diff --git a/erigon-lib/downloader/downloadercfg/logger.go b/erigon-lib/downloader/downloadercfg/logger.go index 88eb5dcabfa..7781f5d5d94 100644 --- a/erigon-lib/downloader/downloadercfg/logger.go +++ b/erigon-lib/downloader/downloadercfg/logger.go @@ -92,7 +92,7 @@ func (b adapterHandler) Handle(r lg.Record) { skip := strings.Contains(str, "EOF") || strings.Contains(str, "requested chunk too long") || strings.Contains(str, "banned ip") || - strings.Contains(str, "banning webseed") || + //strings.Contains(str, "banning webseed") || strings.Contains(str, "TrackerClient closed") || strings.Contains(str, "being sole dirtier of piece") || strings.Contains(str, "webrtc conn for unloaded torrent") || @@ -101,7 +101,7 @@ func (b adapterHandler) Handle(r lg.Record) { strings.Contains(str, "reservation cancelled") if skip { - log.Trace(str) + log.Debug(str) break } log.Warn(str) diff --git a/erigon-lib/downloader/webseed.go b/erigon-lib/downloader/webseed.go index c4080ec2c40..e25eec414cc 100644 --- a/erigon-lib/downloader/webseed.go +++ b/erigon-lib/downloader/webseed.go @@ -67,7 +67,7 @@ func (d *WebSeeds) getWebDownloadInfo(ctx context.Context, t *torrent.Torrent) ( headResponse.Body.Close() if headResponse.StatusCode != http.StatusOK { - d.logger.Debug("[snapshots.webseed] getWebDownloadInfo: HEAD request failed", + d.logger.Trace("[snapshots.webseed] getWebDownloadInfo: HEAD request failed", "webseed", webseed.String(), "name", t.Name(), "status", headResponse.Status) continue } @@ -93,6 +93,10 @@ func (d *WebSeeds) getWebDownloadInfo(ctx context.Context, t *torrent.Torrent) ( seedHashMismatches = append(seedHashMismatches, &seedHash{url: webseed}) } + if len(infos) == 0 { + d.logger.Trace("[snapshots.webseed] webseed info not found", "name", t.Name()) + } + return infos, seedHashMismatches, nil } diff --git a/erigon-lib/go.mod b/erigon-lib/go.mod index 05a19039c31..eeefcf97509 100644 --- a/erigon-lib/go.mod +++ b/erigon-lib/go.mod @@ -148,7 +148,7 @@ require ( ) replace ( - github.com/anacrolix/torrent => github.com/erigontech/torrent v1.54.2-alpha-8 + github.com/anacrolix/torrent => github.com/erigontech/torrent v1.54.2-alpha-10 github.com/holiman/bloomfilter/v2 => github.com/AskAlexSharov/bloomfilter/v2 v2.0.8 github.com/tidwall/btree => github.com/AskAlexSharov/btree v1.6.2 ) diff --git a/erigon-lib/go.sum b/erigon-lib/go.sum index ef7893a67aa..57f098dd177 100644 --- a/erigon-lib/go.sum +++ b/erigon-lib/go.sum @@ -146,8 +146,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/erigontech/mdbx-go v0.38.0 h1:K64h6YHc2biN081DPEp/KP1TE+X0Jmxu8T+RJadNkXc= github.com/erigontech/mdbx-go v0.38.0/go.mod h1:FAMxbOgqOnRDx51j8HjuJZIgznbDwjX7LItd+/UWyA4= -github.com/erigontech/torrent v1.54.2-alpha-8 h1:MQobu6sUZCFbmWpsB7GqAh0IWs7VAZ370POaVxlApIk= -github.com/erigontech/torrent v1.54.2-alpha-8/go.mod h1:nYNeuR4xPlEl4CturFD9/KRXBRJEcJGqjegDNWakwG4= +github.com/erigontech/torrent v1.54.2-alpha-10 h1:MqEorLDG5n2jsNAsSC+TKuZUyExO/KfGumHxh7GHG3o= +github.com/erigontech/torrent v1.54.2-alpha-10/go.mod h1:nYNeuR4xPlEl4CturFD9/KRXBRJEcJGqjegDNWakwG4= github.com/frankban/quicktest v1.9.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= diff --git a/go.mod b/go.mod index eb99e2dde5a..4f4bd143ccb 100644 --- a/go.mod +++ b/go.mod @@ -297,6 +297,6 @@ require ( ) replace ( - github.com/anacrolix/torrent => github.com/erigontech/torrent v1.54.2-alpha-8 + github.com/anacrolix/torrent => github.com/erigontech/torrent v1.54.2-alpha-10 github.com/holiman/bloomfilter/v2 => github.com/AskAlexSharov/bloomfilter/v2 v2.0.8 ) diff --git a/go.sum b/go.sum index f96523c38cb..1bfef335f1c 100644 --- a/go.sum +++ b/go.sum @@ -273,8 +273,8 @@ github.com/erigontech/mdbx-go v0.38.0 h1:K64h6YHc2biN081DPEp/KP1TE+X0Jmxu8T+RJad github.com/erigontech/mdbx-go v0.38.0/go.mod h1:FAMxbOgqOnRDx51j8HjuJZIgznbDwjX7LItd+/UWyA4= github.com/erigontech/silkworm-go v0.18.0 h1:j56p61xZHBFhZGH1OixlGU8KcfjHzcw9pjAfjmVsOZA= github.com/erigontech/silkworm-go v0.18.0/go.mod h1:O50ux0apICEVEGyRWiE488K8qz8lc3PA/SXbQQAc8SU= -github.com/erigontech/torrent v1.54.2-alpha-8 h1:MQobu6sUZCFbmWpsB7GqAh0IWs7VAZ370POaVxlApIk= -github.com/erigontech/torrent v1.54.2-alpha-8/go.mod h1:nYNeuR4xPlEl4CturFD9/KRXBRJEcJGqjegDNWakwG4= +github.com/erigontech/torrent v1.54.2-alpha-10 h1:MqEorLDG5n2jsNAsSC+TKuZUyExO/KfGumHxh7GHG3o= +github.com/erigontech/torrent v1.54.2-alpha-10/go.mod h1:nYNeuR4xPlEl4CturFD9/KRXBRJEcJGqjegDNWakwG4= github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c h1:CndMRAH4JIwxbW8KYq6Q+cGWcGHz0FjGR3QqcInWcW0= github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= From 23d99d637fef70131364c0c6090a37e7d4ac1990 Mon Sep 17 00:00:00 2001 From: awskii Date: Fri, 10 May 2024 18:29:59 +0100 Subject: [PATCH 20/48] make prune each batch in new tx (#10274) Also helps us to prune steps that was just aggregated during previous Prune run --- erigon-lib/state/aggregator.go | 93 ++++++++++++++++++++++++++ erigon-lib/state/domain_shared_test.go | 16 ++--- eth/stagedsync/exec3.go | 17 ++--- eth/stagedsync/stage_execute.go | 1 + turbo/app/snapshots_cmd.go | 25 +++---- 5 files changed, 118 insertions(+), 34 deletions(-) diff --git a/erigon-lib/state/aggregator.go b/erigon-lib/state/aggregator.go index f735cf161dd..5c4f758b52c 100644 --- a/erigon-lib/state/aggregator.go +++ b/erigon-lib/state/aggregator.go @@ -801,6 +801,99 @@ func (ac *AggregatorRoTx) CanUnwindBeforeBlockNum(blockNum uint64, tx kv.Tx) (ui return blockNumWithCommitment, true, nil } +func (ac *AggregatorRoTx) PruneSmallBatchesDb(ctx context.Context, timeout time.Duration, db kv.RwDB) (haveMore bool, err error) { + // On tip-of-chain timeout is about `3sec` + // On tip of chain: must be real-time - prune by small batches and prioritize exact-`timeout` + // Not on tip of chain: must be aggressive (prune as much as possible) by bigger batches + + furiousPrune := timeout > 5*time.Hour + aggressivePrune := !furiousPrune && timeout >= 1*time.Minute + + var pruneLimit uint64 = 1_000 + var withWarmup bool = false //nolint + if furiousPrune { + pruneLimit = 1_000_000 + /* disabling this feature for now - seems it doesn't cancel even after prune finished + // start from a bit high limit to give time for warmup + // will disable warmup after first iteration and will adjust pruneLimit based on `time` + withWarmup = true + */ + } + + started := time.Now() + localTimeout := time.NewTicker(timeout) + defer localTimeout.Stop() + logPeriod := 30 * time.Second + logEvery := time.NewTicker(logPeriod) + defer logEvery.Stop() + aggLogEvery := time.NewTicker(600 * time.Second) // to hide specific domain/idx logging + defer aggLogEvery.Stop() + + fullStat := newAggregatorPruneStat() + innerCtx := context.Background() + goExit := false + + for { + err = db.Update(innerCtx, func(tx kv.RwTx) error { + iterationStarted := time.Now() + // `context.Background()` is important here! + // it allows keep DB consistent - prune all keys-related data or noting + // can't interrupt by ctrl+c and leave dirt in DB + stat, err := ac.Prune(innerCtx, tx, pruneLimit, withWarmup, aggLogEvery) + if err != nil { + ac.a.logger.Warn("[snapshots] PruneSmallBatches failed", "err", err) + return err + } + if stat == nil { + if fstat := fullStat.String(); fstat != "" { + ac.a.logger.Info("[snapshots] PruneSmallBatches finished", "took", time.Since(started).String(), "stat", fstat) + } + goExit = true + return nil + } + fullStat.Accumulate(stat) + + withWarmup = false // warmup once is enough + + if aggressivePrune { + took := time.Since(iterationStarted) + if took < 2*time.Second { + pruneLimit *= 10 + } + if took > logPeriod { + pruneLimit /= 10 + } + } + + select { + case <-logEvery.C: + ac.a.logger.Info("[snapshots] pruning state", + "until commit", time.Until(started.Add(timeout)).String(), + "pruneLimit", pruneLimit, + "aggregatedStep", (ac.minimaxTxNumInDomainFiles(false)-1)/ac.a.StepSize(), + "stepsRangeInDB", ac.a.StepsRangeInDBAsStr(tx), + "pruned", fullStat.String(), + ) + default: + } + return nil + }) + if err != nil { + return false, err + } + select { + case <-localTimeout.C: //must be first to improve responsivness + return true, nil + case <-ctx.Done(): + return false, ctx.Err() + default: + } + if goExit { + return false, nil + } + } +} + // PruneSmallBatches is not cancellable, it's over when it's over or failed. // It fills whole timeout with pruning by small batches (of 100 keys) and making some progress func (ac *AggregatorRoTx) PruneSmallBatches(ctx context.Context, timeout time.Duration, tx kv.RwTx) (haveMore bool, err error) { diff --git a/erigon-lib/state/domain_shared_test.go b/erigon-lib/state/domain_shared_test.go index 5e26a655c96..51fb46ae3bf 100644 --- a/erigon-lib/state/domain_shared_test.go +++ b/erigon-lib/state/domain_shared_test.go @@ -4,15 +4,14 @@ import ( "context" "encoding/binary" "fmt" - "math/rand" - "testing" - "time" - "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/require" + "math/rand" + "testing" + "time" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/types" @@ -425,10 +424,11 @@ func TestSharedDomain_StorageIter(t *testing.T) { ac.Close() ac = agg.BeginFilesRo() - err = db.Update(ctx, func(tx kv.RwTx) error { - _, err = ac.PruneSmallBatches(ctx, 1*time.Minute, tx) - return err - }) + //err = db.Update(ctx, func(tx kv.RwTx) error { + // _, err = ac.PruneSmallBatches(ctx, 1*time.Minute, tx) + // return err + //}) + _, err = ac.PruneSmallBatchesDb(ctx, 1*time.Minute, db) require.NoError(t, err) ac.Close() diff --git a/eth/stagedsync/exec3.go b/eth/stagedsync/exec3.go index 15829ed1f29..dbf701812ea 100644 --- a/eth/stagedsync/exec3.go +++ b/eth/stagedsync/exec3.go @@ -881,6 +881,7 @@ Loop: tt = time.Now() applyTx.CollectMetrics() if !useExternalTx { + aggCtx := applyTx.(state2.HasAggCtx).AggCtx().(*state2.AggregatorRoTx) tt = time.Now() if err = applyTx.Commit(); err != nil { return err @@ -893,18 +894,10 @@ Loop: tt = time.Now() for haveMoreToPrune := true; haveMoreToPrune; { - if err := chainDb.Update(ctx, func(tx kv.RwTx) error { - //very aggressive prune, because: - // if prune is slow - means DB > RAM and skip pruning will only make things worse - // db will grow -> prune will get slower -> db will grow -> ... - if haveMoreToPrune, err = tx.(state2.HasAggCtx). - AggCtx().(*state2.AggregatorRoTx). - PruneSmallBatches(ctx, 10*time.Minute, tx); err != nil { - - return err - } - return nil - }); err != nil { + //very aggressive prune, because: + // if prune is slow - means DB > RAM and skip pruning will only make things worse + // db will grow -> prune will get slower -> db will grow -> ... + if haveMoreToPrune, err = aggCtx.PruneSmallBatchesDb(ctx, 10*time.Minute, chainDb); err != nil { return err } } diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index 9c391c771d1..0278b4c51c9 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -999,6 +999,7 @@ func PruneExecutionStage(s *PruneState, tx kv.RwTx, cfg ExecuteBlockCfg, ctx con if initialCycle { pruneTimeout = 12 * time.Hour } + if _, err = tx.(*temporal.Tx).AggCtx().(*libstate.AggregatorRoTx).PruneSmallBatches(ctx, pruneTimeout, tx); err != nil { // prune part of retired data, before commit return err } diff --git a/turbo/app/snapshots_cmd.go b/turbo/app/snapshots_cmd.go index 7092a079e78..ded60aaa3ef 100644 --- a/turbo/app/snapshots_cmd.go +++ b/turbo/app/snapshots_cmd.go @@ -820,17 +820,15 @@ func doRetireCommand(cliCtx *cli.Context) error { } logger.Info("Prune state history") + ac := agg.BeginFilesRo() + defer ac.Close() for hasMoreToPrune := true; hasMoreToPrune; { - if err := db.UpdateNosync(ctx, func(tx kv.RwTx) error { - ac := agg.BeginFilesRo() - defer ac.Close() - - hasMoreToPrune, err = ac.PruneSmallBatches(ctx, 2*time.Minute, tx) - return err - }); err != nil { + hasMoreToPrune, err = ac.PruneSmallBatchesDb(ctx, 2*time.Minute, db) + if err != nil { return err } } + ac.Close() logger.Info("Work on state history snapshots") indexWorkers := estimate.IndexSnapshot.Workers() @@ -877,17 +875,16 @@ func doRetireCommand(cliCtx *cli.Context) error { }); err != nil { return err } - for hasMoreToPrune := true; hasMoreToPrune; { - if err := db.UpdateNosync(ctx, func(tx kv.RwTx) error { - ac := agg.BeginFilesRo() - defer ac.Close() - hasMoreToPrune, err = ac.PruneSmallBatches(context.Background(), 2*time.Minute, tx) - return err - }); err != nil { + ac = agg.BeginFilesRo() + defer ac.Close() + for hasMoreToPrune := true; hasMoreToPrune; { + hasMoreToPrune, err = ac.PruneSmallBatchesDb(context.Background(), 2*time.Minute, db) + if err != nil { return err } } + ac.Close() if err = agg.MergeLoop(ctx); err != nil { return err From fd703d5ee7de907789831f6c31a519130806b9bb Mon Sep 17 00:00:00 2001 From: awskii Date: Fri, 10 May 2024 19:35:13 +0100 Subject: [PATCH 21/48] no use AggTx after dbTxCommitted (#10279) Aggregator inside AggTx become nil after dbTx it's derived from is Committed/Closed --- eth/stagedsync/exec3.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/eth/stagedsync/exec3.go b/eth/stagedsync/exec3.go index dbf701812ea..124835c6068 100644 --- a/eth/stagedsync/exec3.go +++ b/eth/stagedsync/exec3.go @@ -881,7 +881,6 @@ Loop: tt = time.Now() applyTx.CollectMetrics() if !useExternalTx { - aggCtx := applyTx.(state2.HasAggCtx).AggCtx().(*state2.AggregatorRoTx) tt = time.Now() if err = applyTx.Commit(); err != nil { return err @@ -892,6 +891,9 @@ Loop: agg.BuildFilesInBackground(outputTxNum.Load()) } + aggCtx := agg.BeginFilesRo() + defer aggCtx.Close() + tt = time.Now() for haveMoreToPrune := true; haveMoreToPrune; { //very aggressive prune, because: From 150678fbd798e8e828493afb38e77bba35755dc2 Mon Sep 17 00:00:00 2001 From: awskii Date: Sat, 11 May 2024 01:25:37 +0100 Subject: [PATCH 22/48] fix inverted index pruning (#10276) - fix anti-pruning during pruning (loading into rwtx) - make History prune ordered, less random access - less spill-unspill pages --- erigon-lib/state/inverted_index.go | 123 +++++++++++------------------ 1 file changed, 45 insertions(+), 78 deletions(-) diff --git a/erigon-lib/state/inverted_index.go b/erigon-lib/state/inverted_index.go index 656b678d590..bb79ec24a23 100644 --- a/erigon-lib/state/inverted_index.go +++ b/erigon-lib/state/inverted_index.go @@ -846,6 +846,10 @@ func (iit *InvertedIndexRoTx) Prune(ctx context.Context, rwTx kv.RwTx, txFrom, t defer cleanup() } + if limit == 0 { + limit = math.MaxUint64 + } + ii := iit.ii //defer func() { // ii.logger.Error("[snapshots] prune index", @@ -856,53 +860,38 @@ func (iit *InvertedIndexRoTx) Prune(ctx context.Context, rwTx kv.RwTx, txFrom, t // "tx until limit", limit) //}() - // do not collect and sort keys if it's History index - var indexWithHistoryValues bool - { - itc, err := rwTx.CursorDupSort(ii.indexTable) - if err != nil { - return nil, err - } - idxValuesCount, err := itc.Count() - itc.Close() - if err != nil { - return nil, err - } - indexWithHistoryValues = idxValuesCount == 0 && fn != nil - } - keysCursor, err := rwTx.RwCursorDupSort(ii.indexKeysTable) if err != nil { return stat, fmt.Errorf("create %s keys cursor: %w", ii.filenameBase, err) } defer keysCursor.Close() - - var txKey [8]byte - binary.BigEndian.PutUint64(txKey[:], txFrom) - k, v, err := keysCursor.Seek(txKey[:]) + keysCursorForDel, err := rwTx.RwCursorDupSort(ii.indexKeysTable) if err != nil { - return nil, err - } - if k == nil { - return nil, nil + return stat, fmt.Errorf("create %s keys cursor: %w", ii.filenameBase, err) } - - txFrom = binary.BigEndian.Uint64(k) - if limit == 0 { - limit = math.MaxUint64 + defer keysCursorForDel.Close() + idxC, err := rwTx.RwCursorDupSort(ii.indexTable) + if err != nil { + return nil, err } - if txFrom >= txTo { - return nil, nil + defer idxC.Close() + idxValuesCount, err := idxC.Count() + if err != nil { + return nil, err } + indexWithValues := idxValuesCount != 0 || fn != nil - collector := etl.NewCollector("snapshots", ii.dirs.Tmp, etl.NewOldestEntryBuffer(etl.BufferOptimalSize/8), ii.logger) + collector := etl.NewCollector("snapshots", ii.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize/8), ii.logger) defer collector.Close() collector.LogLvl(log.LvlDebug) collector.SortAndFlushInBackground(true) + var txKey [8]byte + binary.BigEndian.PutUint64(txKey[:], txFrom) + // Invariant: if some `txNum=N` pruned - it's pruned Fully // Means: can use DeleteCurrentDuplicates all values of given `txNum` - for ; k != nil; k, v, err = keysCursor.NextNoDup() { + for k, v, err := keysCursor.Seek(txKey[:]); k != nil; k, v, err = keysCursor.NextNoDup() { if err != nil { return nil, fmt.Errorf("iterate over %s index keys: %w", ii.filenameBase, err) } @@ -918,21 +907,15 @@ func (iit *InvertedIndexRoTx) Prune(ctx context.Context, rwTx kv.RwTx, txFrom, t stat.MinTxNum = min(stat.MinTxNum, txNum) stat.MaxTxNum = max(stat.MaxTxNum, txNum) - for ; v != nil; _, v, err = keysCursor.NextDup() { - if err != nil { - return nil, fmt.Errorf("iterate over %s index keys: %w", ii.filenameBase, err) - } - if !indexWithHistoryValues { - if err := collector.Collect(v, nil); err != nil { - return nil, err + if indexWithValues { + for ; v != nil; _, v, err = keysCursor.NextDup() { + if err != nil { + return nil, fmt.Errorf("iterate over %s index keys: %w", ii.filenameBase, err) } - } - if fn != nil { - if err := fn(v, k); err != nil { + if err := collector.Collect(v, k); err != nil { return nil, err } } - stat.PruneCountValues++ } stat.PruneCountTx++ @@ -945,9 +928,7 @@ func (iit *InvertedIndexRoTx) Prune(ctx context.Context, rwTx kv.RwTx, txFrom, t return nil, ctx.Err() } } - - if indexWithHistoryValues { - // empty indexTable, no need to collect and prune keys out of there + if !indexWithValues { return stat, nil } @@ -956,46 +937,32 @@ func (iit *InvertedIndexRoTx) Prune(ctx context.Context, rwTx kv.RwTx, txFrom, t return nil, err } defer idxCForDeletes.Close() - idxC, err := rwTx.RwCursorDupSort(ii.indexTable) - if err != nil { - return nil, err - } - defer idxC.Close() - - binary.BigEndian.PutUint64(txKey[:], stat.MinTxNum) - err = collector.Load(rwTx, "", func(key, _ []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { - for txnm, err := idxC.SeekBothRange(key, txKey[:]); txnm != nil; _, txnm, err = idxC.NextDup() { - if err != nil { - return err - } - txNum := binary.BigEndian.Uint64(txnm) - if txNum < stat.MinTxNum { - continue // to bigger txnums - } - if txNum > stat.MaxTxNum { - return nil // go to next key - } - if _, _, err = idxCForDeletes.SeekBothExact(key, txnm); err != nil { - return err + binary.BigEndian.PutUint64(txKey[:], txFrom) + err = collector.Load(nil, "", func(key, txnm []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + if fn != nil { + if err = fn(key, txnm); err != nil { + return fmt.Errorf("fn error: %w", err) } - if err = idxCForDeletes.DeleteCurrent(); err != nil { + } + if idxValuesCount > 0 { + if err = idxCForDeletes.DeleteExact(key, txnm); err != nil { return err } - mxPruneSizeIndex.Inc() + } + mxPruneSizeIndex.Inc() + stat.PruneCountValues++ - select { - case <-logEvery.C: - ii.logger.Info("[snapshots] prune index", "name", ii.filenameBase, "pruned tx", stat.PruneCountTx, - "pruned values", stat.PruneCountValues, - "steps", fmt.Sprintf("%.2f-%.2f", float64(txFrom)/float64(ii.aggregationStep), float64(txNum)/float64(ii.aggregationStep))) - case <-ctx.Done(): - return ctx.Err() - default: - } + select { + case <-logEvery.C: + txNum := binary.BigEndian.Uint64(txnm) + ii.logger.Info("[snapshots] prune index", "name", ii.filenameBase, "pruned tx", stat.PruneCountTx, + "pruned values", stat.PruneCountValues, + "steps", fmt.Sprintf("%.2f-%.2f", float64(txFrom)/float64(ii.aggregationStep), float64(txNum)/float64(ii.aggregationStep))) + default: } return nil - }, etl.TransformArgs{}) + }, etl.TransformArgs{Quit: ctx.Done()}) return stat, err } From 757fa7dc9dc07f0cf8c7224626a9f0e41b08a67e Mon Sep 17 00:00:00 2001 From: Mark Holt <135143369+mh0lt@users.noreply.github.com> Date: Sat, 11 May 2024 03:55:39 +0100 Subject: [PATCH 23/48] Add flag for bor waypoint types (#10281) This adds additional flag processing to disable waypoint snaps is `bor.waypoints` is not set Co-authored-by: alex.sharov --- cmd/utils/flags.go | 2 ++ migrations/prohibit_new_downloads2.go | 2 +- polygon/bor/snaptype/types.go | 23 ++++++++++++++++--- .../freezeblocks/bor_snapshots.go | 6 ++--- 4 files changed, 26 insertions(+), 7 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 9ffdf618a00..e68c27afba2 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -60,6 +60,7 @@ import ( "github.com/ledgerwatch/erigon/p2p/nat" "github.com/ledgerwatch/erigon/p2p/netutil" "github.com/ledgerwatch/erigon/params" + borsnaptype "github.com/ledgerwatch/erigon/polygon/bor/snaptype" "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/ledgerwatch/erigon/turbo/logging" ) @@ -1578,6 +1579,7 @@ func setBorConfig(ctx *cli.Context, cfg *ethconfig.Config) { cfg.WithoutHeimdall = ctx.Bool(WithoutHeimdallFlag.Name) cfg.WithHeimdallMilestones = ctx.Bool(WithHeimdallMilestones.Name) cfg.WithHeimdallWaypointRecording = ctx.Bool(WithHeimdallWaypoints.Name) + borsnaptype.RecordWayPoints(cfg.WithHeimdallWaypointRecording) cfg.PolygonSync = ctx.Bool(PolygonSyncFlag.Name) cfg.PolygonSyncStage = ctx.Bool(PolygonSyncStageFlag.Name) } diff --git a/migrations/prohibit_new_downloads2.go b/migrations/prohibit_new_downloads2.go index 22cd00372a9..e278fa71113 100644 --- a/migrations/prohibit_new_downloads2.go +++ b/migrations/prohibit_new_downloads2.go @@ -45,7 +45,7 @@ var ProhibitNewDownloadsLock2 = Migration{ locked = append(locked, t.Name()) } - for _, t := range borsnaptype.BorSnapshotTypes { + for _, t := range borsnaptype.BorSnapshotTypes() { locked = append(locked, t.Name()) } diff --git a/polygon/bor/snaptype/types.go b/polygon/bor/snaptype/types.go index f0f994ec68f..c3abcde1a70 100644 --- a/polygon/bor/snaptype/types.go +++ b/polygon/bor/snaptype/types.go @@ -31,7 +31,11 @@ import ( ) func init() { - borTypes := append(coresnaptype.BlockSnapshotTypes, BorSnapshotTypes...) + initTypes() +} + +func initTypes() { + borTypes := append(coresnaptype.BlockSnapshotTypes, BorSnapshotTypes()...) snapcfg.RegisterKnownTypes(networkname.MumbaiChainName, borTypes) snapcfg.RegisterKnownTypes(networkname.AmoyChainName, borTypes) @@ -402,10 +406,23 @@ var ( return buildValueIndex(ctx, sn, salt, d, firstMilestoneId, tmpDir, p, lvl, logger) }), ) - - BorSnapshotTypes = []snaptype.Type{BorEvents, BorSpans, BorCheckpoints, BorMilestones} ) +var recordWaypoints bool + +func RecordWayPoints(value bool) { + recordWaypoints = value + initTypes() +} + +func BorSnapshotTypes() []snaptype.Type { + if recordWaypoints { + return []snaptype.Type{BorEvents, BorSpans, BorCheckpoints, BorMilestones} + } + + return []snaptype.Type{BorEvents, BorSpans} +} + func extractValueRange(ctx context.Context, table string, valueFrom, valueTo uint64, db kv.RoDB, collect func([]byte) error, workers int, lvl log.Lvl, logger log.Logger) (uint64, error) { logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() diff --git a/turbo/snapshotsync/freezeblocks/bor_snapshots.go b/turbo/snapshotsync/freezeblocks/bor_snapshots.go index c0cf858d15b..178471742c0 100644 --- a/turbo/snapshotsync/freezeblocks/bor_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/bor_snapshots.go @@ -101,7 +101,7 @@ func (br *BlockRetire) retireBorBlocks(ctx context.Context, minBlockNum uint64, return nil } - err := merger.Merge(ctx, &snapshots.RoSnapshots, borsnaptype.BorSnapshotTypes, rangesToMerge, snapshots.Dir(), true /* doIndex */, onMerge, onDelete) + err := merger.Merge(ctx, &snapshots.RoSnapshots, borsnaptype.BorSnapshotTypes(), rangesToMerge, snapshots.Dir(), true /* doIndex */, onMerge, onDelete) if err != nil { return blocksRetired, err @@ -127,7 +127,7 @@ type BorRoSnapshots struct { // - gaps are not allowed // - segment have [from:to] semantic func NewBorRoSnapshots(cfg ethconfig.BlocksFreezing, snapDir string, segmentsMin uint64, logger log.Logger) *BorRoSnapshots { - return &BorRoSnapshots{*newRoSnapshots(cfg, snapDir, borsnaptype.BorSnapshotTypes, segmentsMin, logger)} + return &BorRoSnapshots{*newRoSnapshots(cfg, snapDir, borsnaptype.BorSnapshotTypes(), segmentsMin, logger)} } func (s *BorRoSnapshots) Ranges() []Range { @@ -199,7 +199,7 @@ func removeBorOverlaps(dir string, active []snaptype.FileInfo, max uint64) { } func (s *BorRoSnapshots) ReopenFolder() error { - files, _, err := typedSegments(s.dir, s.segmentsMin.Load(), borsnaptype.BorSnapshotTypes, false) + files, _, err := typedSegments(s.dir, s.segmentsMin.Load(), borsnaptype.BorSnapshotTypes(), false) if err != nil { return err } From f61604c630dab1054e34ee08b1f03c8c30712974 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sat, 11 May 2024 10:15:44 +0700 Subject: [PATCH 24/48] e3: SharedDomains.SetTx also must set aggCtx field (#10273) because tx and aggCtx is bounded things and aggCtx even has cursors pool inside GetLatest. also renamed `HasAggCtx` to `HasAggTx` --------- Co-authored-by: awskii Co-authored-by: awskii --- cmd/integration/commands/stages.go | 4 +- core/chain_makers.go | 4 +- core/state/rw_v3.go | 2 +- .../kv/membatchwithdb/memory_mutation.go | 6 +- erigon-lib/kv/temporal/kv_temporal.go | 2 +- erigon-lib/state/aggregator_bench_test.go | 2 +- erigon-lib/state/domain.go | 12 +- erigon-lib/state/domain_shared.go | 130 +++++++++--------- eth/integrity/e3_ef_files.go | 2 +- eth/integrity/e3_history_no_system_txs.go | 2 +- eth/stagedsync/exec3.go | 10 +- eth/stagedsync/stage_execute.go | 5 +- eth/stagedsync/stage_headers.go | 2 +- eth/stagedsync/stage_snapshots.go | 2 +- eth/stagedsync/stage_trie3.go | 4 +- eth/stagedsync/sync.go | 4 +- 16 files changed, 99 insertions(+), 94 deletions(-) diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index 3414e4a7e10..2c9505d9b76 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -1200,7 +1200,7 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error { if unwind > 0 && historyV3 { if err := db.View(ctx, func(tx kv.Tx) error { - blockNumWithCommitment, ok, err := tx.(libstate.HasAggCtx).AggCtx().(*libstate.AggregatorRoTx).CanUnwindBeforeBlockNum(s.BlockNumber-unwind, tx) + blockNumWithCommitment, ok, err := tx.(libstate.HasAggTx).AggTx().(*libstate.AggregatorRoTx).CanUnwindBeforeBlockNum(s.BlockNumber-unwind, tx) if err != nil { return err } @@ -1307,7 +1307,7 @@ func stageCustomTrace(db kv.RwDB, ctx context.Context, logger log.Logger) error if unwind > 0 && historyV3 { if err := db.View(ctx, func(tx kv.Tx) error { - blockNumWithCommitment, ok, err := tx.(libstate.HasAggCtx).AggCtx().(*libstate.AggregatorRoTx).CanUnwindBeforeBlockNum(s.BlockNumber-unwind, tx) + blockNumWithCommitment, ok, err := tx.(libstate.HasAggTx).AggTx().(*libstate.AggregatorRoTx).CanUnwindBeforeBlockNum(s.BlockNumber-unwind, tx) if err != nil { return err } diff --git a/core/chain_makers.go b/core/chain_makers.go index 5e3c6447d08..6b1054c75c8 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -477,7 +477,7 @@ func CalcHashRootForTests(tx kv.RwTx, header *types.Header, histV4, trace bool) h := libcommon.NewHasher() defer libcommon.ReturnHasherToPool(h) - it, err := tx.(state2.HasAggCtx).AggCtx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.AccountsDomain, nil, nil, -1) + it, err := tx.(state2.HasAggTx).AggTx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.AccountsDomain, nil, nil, -1) if err != nil { return libcommon.Hash{}, err } @@ -502,7 +502,7 @@ func CalcHashRootForTests(tx kv.RwTx, header *types.Header, histV4, trace bool) } } - it, err = tx.(state2.HasAggCtx).AggCtx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.StorageDomain, nil, nil, -1) + it, err = tx.(state2.HasAggTx).AggTx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.StorageDomain, nil, nil, -1) if err != nil { return libcommon.Hash{}, err } diff --git a/core/state/rw_v3.go b/core/state/rw_v3.go index 71ec3b7a4e3..3b149ed8fbe 100644 --- a/core/state/rw_v3.go +++ b/core/state/rw_v3.go @@ -249,7 +249,7 @@ func (rs *StateV3) ApplyLogsAndTraces4(txTask *TxTask, domains *libstate.SharedD } func (rs *StateV3) Unwind(ctx context.Context, tx kv.RwTx, blockUnwindTo, txUnwindTo uint64, accumulator *shards.Accumulator) error { - unwindToLimit := tx.(libstate.HasAggCtx).AggCtx().(*libstate.AggregatorRoTx).CanUnwindDomainsToTxNum() + unwindToLimit := tx.(libstate.HasAggTx).AggTx().(*libstate.AggregatorRoTx).CanUnwindDomainsToTxNum() if txUnwindTo < unwindToLimit { return fmt.Errorf("can't unwind to txNum=%d, limit is %d", txUnwindTo, unwindToLimit) } diff --git a/erigon-lib/kv/membatchwithdb/memory_mutation.go b/erigon-lib/kv/membatchwithdb/memory_mutation.go index 63d32895e31..ecfe85b92b8 100644 --- a/erigon-lib/kv/membatchwithdb/memory_mutation.go +++ b/erigon-lib/kv/membatchwithdb/memory_mutation.go @@ -717,11 +717,11 @@ func (m *MemoryMutation) CHandle() unsafe.Pointer { } type hasAggCtx interface { - AggCtx() interface{} + AggTx() interface{} } -func (m *MemoryMutation) AggCtx() interface{} { - return m.db.(hasAggCtx).AggCtx() +func (m *MemoryMutation) AggTx() interface{} { + return m.db.(hasAggCtx).AggTx() } func (m *MemoryMutation) DomainGet(name kv.Domain, k, k2 []byte) (v []byte, step uint64, err error) { diff --git a/erigon-lib/kv/temporal/kv_temporal.go b/erigon-lib/kv/temporal/kv_temporal.go index a15c91c1ada..f9acad0e70e 100644 --- a/erigon-lib/kv/temporal/kv_temporal.go +++ b/erigon-lib/kv/temporal/kv_temporal.go @@ -156,7 +156,7 @@ func (tx *Tx) ForceReopenAggCtx() { func (tx *Tx) WarmupDB(force bool) error { return tx.MdbxTx.WarmupDB(force) } func (tx *Tx) LockDBInRam() error { return tx.MdbxTx.LockDBInRam() } -func (tx *Tx) AggCtx() interface{} { return tx.aggCtx } +func (tx *Tx) AggTx() interface{} { return tx.aggCtx } func (tx *Tx) Agg() *state.Aggregator { return tx.db.agg } func (tx *Tx) Rollback() { tx.autoClose() diff --git a/erigon-lib/state/aggregator_bench_test.go b/erigon-lib/state/aggregator_bench_test.go index 7d641c14a2a..bf9c7fa15c6 100644 --- a/erigon-lib/state/aggregator_bench_test.go +++ b/erigon-lib/state/aggregator_bench_test.go @@ -44,7 +44,7 @@ type txWithCtx struct { } func WrapTxWithCtx(tx kv.Tx, ctx *AggregatorRoTx) *txWithCtx { return &txWithCtx{Tx: tx, ac: ctx} } -func (tx *txWithCtx) AggCtx() interface{} { return tx.ac } +func (tx *txWithCtx) AggTx() interface{} { return tx.ac } func BenchmarkAggregator_Processing(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) diff --git a/erigon-lib/state/domain.go b/erigon-lib/state/domain.go index b18c10a8fe0..59e5ee2d4ff 100644 --- a/erigon-lib/state/domain.go +++ b/erigon-lib/state/domain.go @@ -1447,7 +1447,7 @@ func (dt *DomainRoTx) valsCursor(tx kv.Tx) (c kv.Cursor, err error) { } dt.valsC, err = tx.Cursor(dt.d.valsTable) if err != nil { - return nil, err + return nil, fmt.Errorf("valsCursor: %w", err) } return dt.valsC, nil } @@ -1458,7 +1458,7 @@ func (dt *DomainRoTx) keysCursor(tx kv.Tx) (c kv.CursorDupSort, err error) { } dt.keysC, err = tx.CursorDupSort(dt.d.keysTable) if err != nil { - return nil, err + return nil, fmt.Errorf("keysCursor: %w", err) } return dt.keysC, nil } @@ -1529,7 +1529,7 @@ func (dt *DomainRoTx) GetLatest(key1, key2 []byte, roTx kv.Tx) ([]byte, uint64, v, foundStep, found, err = dt.getLatestFromDb(key, roTx) if err != nil { - return nil, 0, false, err + return nil, 0, false, fmt.Errorf("getLatestFromDb: %w", err) } if found { return v, foundStep, true, nil @@ -1537,7 +1537,7 @@ func (dt *DomainRoTx) GetLatest(key1, key2 []byte, roTx kv.Tx) ([]byte, uint64, v, foundInFile, _, endTxNum, err := dt.getFromFiles(key) if err != nil { - return nil, 0, false, err + return nil, 0, false, fmt.Errorf("getFromFiles: %w", err) } return v, endTxNum / dt.d.aggregationStep, foundInFile, nil } @@ -1690,11 +1690,11 @@ func (dt *DomainRoTx) DomainRange(tx kv.Tx, fromKey, toKey []byte, ts uint64, as if !asc { panic("implement me") } - //histStateIt, err := tx.aggCtx.AccountHistoricalStateRange(asOfTs, fromKey, toKey, limit, tx.MdbxTx) + //histStateIt, err := tx.aggTx.AccountHistoricalStateRange(asOfTs, fromKey, toKey, limit, tx.MdbxTx) //if err != nil { // return nil, err //} - //lastestStateIt, err := tx.aggCtx.DomainRangeLatest(tx.MdbxTx, kv.AccountDomain, fromKey, toKey, limit) + //lastestStateIt, err := tx.aggTx.DomainRangeLatest(tx.MdbxTx, kv.AccountDomain, fromKey, toKey, limit) //if err != nil { // return nil, err //} diff --git a/erigon-lib/state/domain_shared.go b/erigon-lib/state/domain_shared.go index 30fe130a958..311a3198db2 100644 --- a/erigon-lib/state/domain_shared.go +++ b/erigon-lib/state/domain_shared.go @@ -56,7 +56,7 @@ func (l *KvList) Swap(i, j int) { type SharedDomains struct { noFlush int - aggCtx *AggregatorRoTx + aggTx *AggregatorRoTx sdCtx *SharedDomainsCommitmentContext roTx kv.Tx logger log.Logger @@ -78,34 +78,24 @@ type SharedDomains struct { tracesToWriter *invertedIndexBufferedWriter } -type HasAggCtx interface { - AggCtx() interface{} +type HasAggTx interface { + AggTx() interface{} } func NewSharedDomains(tx kv.Tx, logger log.Logger) (*SharedDomains, error) { - var ac *AggregatorRoTx - if casted, ok := tx.(HasAggCtx); ok { - ac = casted.AggCtx().(*AggregatorRoTx) - } else { - return nil, fmt.Errorf("type %T need AggCtx method", tx) - } - if tx == nil { - return nil, fmt.Errorf("tx is nil") - } - sd := &SharedDomains{ - logger: logger, - aggCtx: ac, - roTx: tx, - //trace: true, - logAddrsWriter: ac.logAddrs.NewWriter(), - logTopicsWriter: ac.logTopics.NewWriter(), - tracesFromWriter: ac.tracesFrom.NewWriter(), - tracesToWriter: ac.tracesTo.NewWriter(), - + logger: logger, storage: btree2.NewMap[string, []byte](128), + //trace: true, } - for id, d := range ac.d { + sd.SetTx(tx) + + sd.logAddrsWriter = sd.aggTx.logAddrs.NewWriter() + sd.logTopicsWriter = sd.aggTx.logTopics.NewWriter() + sd.tracesFromWriter = sd.aggTx.tracesFrom.NewWriter() + sd.tracesToWriter = sd.aggTx.tracesTo.NewWriter() + + for id, d := range sd.aggTx.d { sd.domains[id] = map[string][]byte{} sd.dWriter[id] = d.NewWriter() } @@ -119,37 +109,37 @@ func NewSharedDomains(tx kv.Tx, logger log.Logger) (*SharedDomains, error) { return sd, nil } -func (sd *SharedDomains) AggCtx() interface{} { return sd.aggCtx } +func (sd *SharedDomains) AggTx() interface{} { return sd.aggTx } -// aggregator context should call aggCtx.Unwind before this one. +// aggregator context should call aggTx.Unwind before this one. func (sd *SharedDomains) Unwind(ctx context.Context, rwTx kv.RwTx, blockUnwindTo, txUnwindTo uint64) error { - step := txUnwindTo / sd.aggCtx.a.StepSize() + step := txUnwindTo / sd.aggTx.a.StepSize() logEvery := time.NewTicker(30 * time.Second) defer logEvery.Stop() - sd.aggCtx.a.logger.Info("aggregator unwind", "step", step, - "txUnwindTo", txUnwindTo, "stepsRangeInDB", sd.aggCtx.a.StepsRangeInDBAsStr(rwTx)) - //fmt.Printf("aggregator unwind step %d txUnwindTo %d stepsRangeInDB %s\n", step, txUnwindTo, sd.aggCtx.a.StepsRangeInDBAsStr(rwTx)) + sd.aggTx.a.logger.Info("aggregator unwind", "step", step, + "txUnwindTo", txUnwindTo, "stepsRangeInDB", sd.aggTx.a.StepsRangeInDBAsStr(rwTx)) + //fmt.Printf("aggregator unwind step %d txUnwindTo %d stepsRangeInDB %s\n", step, txUnwindTo, sd.aggTx.a.StepsRangeInDBAsStr(rwTx)) if err := sd.Flush(ctx, rwTx); err != nil { return err } withWarmup := false - for _, d := range sd.aggCtx.d { + for _, d := range sd.aggTx.d { if err := d.Unwind(ctx, rwTx, step, txUnwindTo); err != nil { return err } } - if _, err := sd.aggCtx.logAddrs.Prune(ctx, rwTx, txUnwindTo, math.MaxUint64, math.MaxUint64, logEvery, true, withWarmup, nil); err != nil { + if _, err := sd.aggTx.logAddrs.Prune(ctx, rwTx, txUnwindTo, math.MaxUint64, math.MaxUint64, logEvery, true, withWarmup, nil); err != nil { return err } - if _, err := sd.aggCtx.logTopics.Prune(ctx, rwTx, txUnwindTo, math.MaxUint64, math.MaxUint64, logEvery, true, withWarmup, nil); err != nil { + if _, err := sd.aggTx.logTopics.Prune(ctx, rwTx, txUnwindTo, math.MaxUint64, math.MaxUint64, logEvery, true, withWarmup, nil); err != nil { return err } - if _, err := sd.aggCtx.tracesFrom.Prune(ctx, rwTx, txUnwindTo, math.MaxUint64, math.MaxUint64, logEvery, true, withWarmup, nil); err != nil { + if _, err := sd.aggTx.tracesFrom.Prune(ctx, rwTx, txUnwindTo, math.MaxUint64, math.MaxUint64, logEvery, true, withWarmup, nil); err != nil { return err } - if _, err := sd.aggCtx.tracesTo.Prune(ctx, rwTx, txUnwindTo, math.MaxUint64, math.MaxUint64, logEvery, true, withWarmup, nil); err != nil { + if _, err := sd.aggTx.tracesTo.Prune(ctx, rwTx, txUnwindTo, math.MaxUint64, math.MaxUint64, logEvery, true, withWarmup, nil); err != nil { return err } @@ -160,7 +150,7 @@ func (sd *SharedDomains) Unwind(ctx context.Context, rwTx kv.RwTx, blockUnwindTo } func (sd *SharedDomains) rebuildCommitment(ctx context.Context, roTx kv.Tx, blockNum uint64) ([]byte, error) { - it, err := sd.aggCtx.AccountHistoryRange(int(sd.TxNum()), math.MaxInt64, order.Asc, -1, roTx) + it, err := sd.aggTx.AccountHistoryRange(int(sd.TxNum()), math.MaxInt64, order.Asc, -1, roTx) if err != nil { return nil, err } @@ -172,7 +162,7 @@ func (sd *SharedDomains) rebuildCommitment(ctx context.Context, roTx kv.Tx, bloc sd.sdCtx.TouchKey(kv.AccountsDomain, string(k), nil) } - it, err = sd.aggCtx.StorageHistoryRange(int(sd.TxNum()), math.MaxInt64, order.Asc, -1, roTx) + it, err = sd.aggTx.StorageHistoryRange(int(sd.TxNum()), math.MaxInt64, order.Asc, -1, roTx) if err != nil { return nil, err } @@ -191,7 +181,7 @@ func (sd *SharedDomains) rebuildCommitment(ctx context.Context, roTx kv.Tx, bloc // SeekCommitment lookups latest available commitment and sets it as current func (sd *SharedDomains) SeekCommitment(ctx context.Context, tx kv.Tx) (txsFromBlockBeginning uint64, err error) { - bn, txn, ok, err := sd.sdCtx.SeekCommitment(tx, sd.aggCtx.d[kv.CommitmentDomain], 0, math.MaxUint64) + bn, txn, ok, err := sd.sdCtx.SeekCommitment(tx, sd.aggTx.d[kv.CommitmentDomain], 0, math.MaxUint64) if err != nil { return 0, err } @@ -307,7 +297,7 @@ func (sd *SharedDomains) LatestCommitment(prefix []byte) ([]byte, uint64, error) // sd cache values as is (without transformation) so safe to return return v, 0, nil } - v, step, found, err := sd.aggCtx.d[kv.CommitmentDomain].getLatestFromDb(prefix, sd.roTx) + v, step, found, err := sd.aggTx.d[kv.CommitmentDomain].getLatestFromDb(prefix, sd.roTx) if err != nil { return nil, 0, fmt.Errorf("commitment prefix %x read error: %w", prefix, err) } @@ -318,12 +308,12 @@ func (sd *SharedDomains) LatestCommitment(prefix []byte) ([]byte, uint64, error) // GetfromFiles doesn't provide same semantics as getLatestFromDB - it returns start/end tx // of file where the value is stored (not exact step when kv has been set) - v, _, startTx, endTx, err := sd.aggCtx.d[kv.CommitmentDomain].getFromFiles(prefix) + v, _, startTx, endTx, err := sd.aggTx.d[kv.CommitmentDomain].getFromFiles(prefix) if err != nil { return nil, 0, fmt.Errorf("commitment prefix %x read error: %w", prefix, err) } - if !sd.aggCtx.a.commitmentValuesTransform || bytes.Equal(prefix, keyCommitmentState) { + if !sd.aggTx.a.commitmentValuesTransform || bytes.Equal(prefix, keyCommitmentState) { return v, endTx, nil } @@ -332,25 +322,25 @@ func (sd *SharedDomains) LatestCommitment(prefix []byte) ([]byte, uint64, error) if err != nil { return nil, 0, err } - return rv, endTx / sd.aggCtx.a.StepSize(), nil + return rv, endTx / sd.aggTx.a.StepSize(), nil } // replaceShortenedKeysInBranch replaces shortened keys in the branch with full keys func (sd *SharedDomains) replaceShortenedKeysInBranch(prefix []byte, branch commitment.BranchData, fStartTxNum uint64, fEndTxNum uint64) (commitment.BranchData, error) { - if !sd.aggCtx.d[kv.CommitmentDomain].d.replaceKeysInValues && sd.aggCtx.a.commitmentValuesTransform { + if !sd.aggTx.d[kv.CommitmentDomain].d.replaceKeysInValues && sd.aggTx.a.commitmentValuesTransform { panic("domain.replaceKeysInValues is disabled, but agg.commitmentValuesTransform is enabled") } - if !sd.aggCtx.a.commitmentValuesTransform || + if !sd.aggTx.a.commitmentValuesTransform || len(branch) == 0 || - sd.aggCtx.minimaxTxNumInDomainFiles(false) == 0 || + sd.aggTx.minimaxTxNumInDomainFiles(false) == 0 || bytes.Equal(prefix, keyCommitmentState) { return branch, nil // do not transform, return as is } - sto := sd.aggCtx.d[kv.StorageDomain] - acc := sd.aggCtx.d[kv.AccountsDomain] + sto := sd.aggTx.d[kv.StorageDomain] + acc := sd.aggTx.d[kv.AccountsDomain] storageItem := sto.lookupFileByItsRange(fStartTxNum, fEndTxNum) accountItem := acc.lookupFileByItsRange(fStartTxNum, fEndTxNum) storageGetter := NewArchiveGetter(storageItem.decompressor.MakeGetter(), sto.d.compression) @@ -365,7 +355,7 @@ func (sd *SharedDomains) replaceShortenedKeysInBranch(prefix []byte, branch comm // Optimised key referencing a state file record (file number and offset within the file) storagePlainKey, found := sto.lookupByShortenedKey(key, storageGetter) if !found { - s0, s1 := fStartTxNum/sd.aggCtx.a.StepSize(), fEndTxNum/sd.aggCtx.a.StepSize() + s0, s1 := fStartTxNum/sd.aggTx.a.StepSize(), fEndTxNum/sd.aggTx.a.StepSize() sd.logger.Crit("replace back lost storage full key", "shortened", fmt.Sprintf("%x", key), "decoded", fmt.Sprintf("step %d-%d; offt %d", s0, s1, decodeShorterKey(key))) return nil, fmt.Errorf("replace back lost storage full key: %x", key) @@ -379,7 +369,7 @@ func (sd *SharedDomains) replaceShortenedKeysInBranch(prefix []byte, branch comm apkBuf, found := acc.lookupByShortenedKey(key, accountGetter) if !found { - s0, s1 := fStartTxNum/sd.aggCtx.a.StepSize(), fEndTxNum/sd.aggCtx.a.StepSize() + s0, s1 := fStartTxNum/sd.aggTx.a.StepSize(), fEndTxNum/sd.aggTx.a.StepSize() sd.logger.Crit("replace back lost account full key", "shortened", fmt.Sprintf("%x", key), "decoded", fmt.Sprintf("step %d-%d; offt %d", s0, s1, decodeShorterKey(key))) return nil, fmt.Errorf("replace back lost account full key: %x", key) @@ -521,8 +511,24 @@ func (sd *SharedDomains) IndexAdd(table kv.InvertedIdx, key []byte) (err error) return err } -func (sd *SharedDomains) SetTx(tx kv.RwTx) { sd.roTx = tx } -func (sd *SharedDomains) StepSize() uint64 { return sd.aggCtx.a.StepSize() } +func (sd *SharedDomains) SetTx(tx kv.Tx) { + if tx == nil { + panic(fmt.Errorf("tx is nil")) + } + sd.roTx = tx + + casted, ok := tx.(HasAggTx) + if !ok { + panic(fmt.Errorf("type %T need AggTx method", tx)) + } + + sd.aggTx = casted.AggTx().(*AggregatorRoTx) + if sd.aggTx == nil { + panic(fmt.Errorf("aggtx is nil")) + } +} + +func (sd *SharedDomains) StepSize() uint64 { return sd.aggTx.a.StepSize() } // SetTxNum sets txNum for all domains as well as common txNum for all domains // Requires for sd.rwTx because of commitment evaluation in shared domains if aggregationStep is reached @@ -593,7 +599,7 @@ func (sd *SharedDomains) IterateStoragePrefix(prefix []byte, it func(k []byte, v } roTx := sd.roTx - keysCursor, err := roTx.CursorDupSort(sd.aggCtx.a.d[kv.StorageDomain].keysTable) + keysCursor, err := roTx.CursorDupSort(sd.aggTx.a.d[kv.StorageDomain].keysTable) if err != nil { return err } @@ -611,13 +617,13 @@ func (sd *SharedDomains) IterateStoragePrefix(prefix []byte, it func(k []byte, v keySuffix := make([]byte, len(k)+8) copy(keySuffix, k) copy(keySuffix[len(k):], v) - if v, err = roTx.GetOne(sd.aggCtx.a.d[kv.StorageDomain].valsTable, keySuffix); err != nil { + if v, err = roTx.GetOne(sd.aggTx.a.d[kv.StorageDomain].valsTable, keySuffix); err != nil { return err } heap.Push(cpPtr, &CursorItem{t: DB_CURSOR, key: common.Copy(k), val: common.Copy(v), step: step, c: keysCursor, endTxNum: endTxNum, reverse: true}) } - sctx := sd.aggCtx.d[kv.StorageDomain] + sctx := sd.aggTx.d[kv.StorageDomain] for i, item := range sctx.files { cursor, err := item.src.bindex.Seek(sctx.statelessGetter(i), prefix) if err != nil { @@ -691,7 +697,7 @@ func (sd *SharedDomains) IterateStoragePrefix(prefix []byte, it func(k []byte, v keySuffix := make([]byte, len(k)+8) copy(keySuffix, k) copy(keySuffix[len(k):], v) - if v, err = roTx.GetOne(sd.aggCtx.a.d[kv.StorageDomain].valsTable, keySuffix); err != nil { + if v, err = roTx.GetOne(sd.aggTx.a.d[kv.StorageDomain].valsTable, keySuffix); err != nil { return err } ci1.val = common.Copy(v) @@ -711,7 +717,7 @@ func (sd *SharedDomains) IterateStoragePrefix(prefix []byte, it func(k []byte, v func (sd *SharedDomains) Close() { sd.SetBlockNum(0) - if sd.aggCtx != nil { + if sd.aggTx != nil { sd.SetTxNum(0) //sd.walLock.Lock() @@ -743,7 +749,7 @@ func (sd *SharedDomains) Flush(ctx context.Context, tx kv.RwTx) error { } if sd.trace { _, f, l, _ := runtime.Caller(1) - fmt.Printf("[SD aggCtx=%d] FLUSHING at tx %d [%x], caller %s:%d\n", sd.aggCtx.id, sd.TxNum(), fh, filepath.Base(f), l) + fmt.Printf("[SD aggTx=%d] FLUSHING at tx %d [%x], caller %s:%d\n", sd.aggTx.id, sd.TxNum(), fh, filepath.Base(f), l) } for _, d := range sd.dWriter { if d != nil { @@ -765,7 +771,7 @@ func (sd *SharedDomains) Flush(ctx context.Context, tx kv.RwTx) error { return err } if dbg.PruneOnFlushTimeout != 0 { - _, err = sd.aggCtx.PruneSmallBatches(ctx, dbg.PruneOnFlushTimeout, tx) + _, err = sd.aggTx.PruneSmallBatches(ctx, dbg.PruneOnFlushTimeout, tx) if err != nil { return err } @@ -796,7 +802,7 @@ func (sd *SharedDomains) DomainGet(domain kv.Domain, k, k2 []byte) (v []byte, st if v, ok := sd.get(domain, k); ok { return v, 0, nil } - v, step, _, err = sd.aggCtx.GetLatest(domain, k, nil, sd.roTx) + v, step, _, err = sd.aggTx.GetLatest(domain, k, nil, sd.roTx) if err != nil { return nil, 0, fmt.Errorf("storage %x read error: %w", k, err) } @@ -925,7 +931,7 @@ func NewSharedDomainsCommitmentContext(sd *SharedDomains, mode commitment.Mode, keccak: sha3.NewLegacyKeccak256().(cryptozerocopy.KeccakState), } - ctx.patriciaTrie, ctx.updates = commitment.InitializeTrieAndUpdateTree(trieVariant, mode, sd.aggCtx.a.tmpdir) + ctx.patriciaTrie, ctx.updates = commitment.InitializeTrieAndUpdateTree(trieVariant, mode, sd.aggTx.a.tmpdir) ctx.patriciaTrie.ResetContext(ctx) return ctx } @@ -1031,7 +1037,7 @@ func (sdc *SharedDomainsCommitmentContext) Reset() { } func (sdc *SharedDomainsCommitmentContext) TempDir() string { - return sdc.sd.aggCtx.a.dirs.Tmp + return sdc.sd.aggTx.a.dirs.Tmp } func (sdc *SharedDomainsCommitmentContext) KeysCount() uint64 { @@ -1111,7 +1117,7 @@ func (sdc *SharedDomainsCommitmentContext) ComputeCommitment(ctx context.Context } func (sdc *SharedDomainsCommitmentContext) storeCommitmentState(blockNum uint64, rh []byte) error { - if sdc.sd.aggCtx == nil { + if sdc.sd.aggTx == nil { return fmt.Errorf("store commitment state: AggregatorContext is not initialized") } encodedState, err := sdc.encodeCommitmentState(blockNum, sdc.sd.txNum) @@ -1164,7 +1170,7 @@ func (sdc *SharedDomainsCommitmentContext) encodeCommitmentState(blockNum, txNum var keyCommitmentState = []byte("state") func (sd *SharedDomains) LatestCommitmentState(tx kv.Tx, sinceTx, untilTx uint64) (blockNum, txNum uint64, state []byte, err error) { - return sd.sdCtx.LatestCommitmentState(tx, sd.aggCtx.d[kv.CommitmentDomain], sinceTx, untilTx) + return sd.sdCtx.LatestCommitmentState(tx, sd.aggTx.d[kv.CommitmentDomain], sinceTx, untilTx) } func _decodeTxBlockNums(v []byte) (txNum, blockNum uint64) { diff --git a/eth/integrity/e3_ef_files.go b/eth/integrity/e3_ef_files.go index 84e49c0a8fe..216c31e4f5d 100644 --- a/eth/integrity/e3_ef_files.go +++ b/eth/integrity/e3_ef_files.go @@ -27,7 +27,7 @@ func E3EfFiles(ctx context.Context, chainDB kv.RwDB, agg *state.Aggregator) erro } defer tx.Rollback() - err = tx.(state.HasAggCtx).AggCtx().(*state.AggregatorRoTx).DebugEFAllValuesAreInRange(ctx, idx) + err = tx.(state.HasAggTx).AggTx().(*state.AggregatorRoTx).DebugEFAllValuesAreInRange(ctx, idx) if err != nil { return err } diff --git a/eth/integrity/e3_history_no_system_txs.go b/eth/integrity/e3_history_no_system_txs.go index 70906bb2030..5f3739a1ffa 100644 --- a/eth/integrity/e3_history_no_system_txs.go +++ b/eth/integrity/e3_history_no_system_txs.go @@ -38,7 +38,7 @@ func E3HistoryNoSystemTxs(ctx context.Context, chainDB kv.RwDB, agg *state.Aggre defer tx.Rollback() var minStep uint64 = math.MaxUint64 - keys, err := tx.(state.HasAggCtx).AggCtx().(*state.AggregatorRoTx).DomainRangeLatest(tx, kv.AccountsDomain, []byte{byte(j), byte(jj)}, []byte{byte(j), byte(jj + 1)}, -1) + keys, err := tx.(state.HasAggTx).AggTx().(*state.AggregatorRoTx).DomainRangeLatest(tx, kv.AccountsDomain, []byte{byte(j), byte(jj)}, []byte{byte(j), byte(jj + 1)}, -1) if err != nil { return err } diff --git a/eth/stagedsync/exec3.go b/eth/stagedsync/exec3.go index 124835c6068..9b7571062b1 100644 --- a/eth/stagedsync/exec3.go +++ b/eth/stagedsync/exec3.go @@ -1018,7 +1018,7 @@ func dumpPlainStateDebug(tx kv.RwTx, doms *state2.SharedDomains) { doms.Flush(context.Background(), tx) } { - it, err := tx.(state2.HasAggCtx).AggCtx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.AccountsDomain, nil, nil, -1) + it, err := tx.(state2.HasAggTx).AggTx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.AccountsDomain, nil, nil, -1) if err != nil { panic(err) } @@ -1033,7 +1033,7 @@ func dumpPlainStateDebug(tx kv.RwTx, doms *state2.SharedDomains) { } } { - it, err := tx.(state2.HasAggCtx).AggCtx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.StorageDomain, nil, nil, -1) + it, err := tx.(state2.HasAggTx).AggTx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.StorageDomain, nil, nil, -1) if err != nil { panic(1) } @@ -1046,7 +1046,7 @@ func dumpPlainStateDebug(tx kv.RwTx, doms *state2.SharedDomains) { } } { - it, err := tx.(state2.HasAggCtx).AggCtx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.CommitmentDomain, nil, nil, -1) + it, err := tx.(state2.HasAggTx).AggTx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.CommitmentDomain, nil, nil, -1) if err != nil { panic(1) } @@ -1127,7 +1127,7 @@ func flushAndCheckCommitmentV3(ctx context.Context, header *types.Header, applyT return false, nil } - unwindToLimit, err := applyTx.(state2.HasAggCtx).AggCtx().(*state2.AggregatorRoTx).CanUnwindDomainsToBlockNum(applyTx) + unwindToLimit, err := applyTx.(state2.HasAggTx).AggTx().(*state2.AggregatorRoTx).CanUnwindDomainsToBlockNum(applyTx) if err != nil { return false, err } @@ -1138,7 +1138,7 @@ func flushAndCheckCommitmentV3(ctx context.Context, header *types.Header, applyT unwindTo := maxBlockNum - jump // protect from too far unwind - allowedUnwindTo, ok, err := applyTx.(state2.HasAggCtx).AggCtx().(*state2.AggregatorRoTx).CanUnwindBeforeBlockNum(unwindTo, applyTx) + allowedUnwindTo, ok, err := applyTx.(state2.HasAggTx).AggTx().(*state2.AggregatorRoTx).CanUnwindBeforeBlockNum(unwindTo, applyTx) if err != nil { return false, err } diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index 0278b4c51c9..53ba5c40ea7 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -345,7 +345,7 @@ func unwindExec3(u *UnwindState, s *StageState, txc wrap.TxContainer, ctx contex //} start := time.Now() - unwindToLimit, err := txc.Tx.(libstate.HasAggCtx).AggCtx().(*libstate.AggregatorRoTx).CanUnwindDomainsToBlockNum(txc.Tx) + unwindToLimit, err := txc.Tx.(libstate.HasAggTx).AggTx().(*libstate.AggregatorRoTx).CanUnwindDomainsToBlockNum(txc.Tx) if err != nil { return err } @@ -999,8 +999,7 @@ func PruneExecutionStage(s *PruneState, tx kv.RwTx, cfg ExecuteBlockCfg, ctx con if initialCycle { pruneTimeout = 12 * time.Hour } - - if _, err = tx.(*temporal.Tx).AggCtx().(*libstate.AggregatorRoTx).PruneSmallBatches(ctx, pruneTimeout, tx); err != nil { // prune part of retired data, before commit + if _, err = tx.(*temporal.Tx).AggTx().(*libstate.AggregatorRoTx).PruneSmallBatches(ctx, pruneTimeout, tx); err != nil { // prune part of retired data, before commit return err } } else { diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index a62d8250954..1e57d1aa7ca 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -335,7 +335,7 @@ Loop: } defer doms.Close() - allowedUnwindTo, ok, err := tx.(state.HasAggCtx).AggCtx().(*state.AggregatorRoTx).CanUnwindBeforeBlockNum(unwindTo, tx) + allowedUnwindTo, ok, err := tx.(state.HasAggTx).AggTx().(*state.AggregatorRoTx).CanUnwindBeforeBlockNum(unwindTo, tx) if err != nil { return err } diff --git a/eth/stagedsync/stage_snapshots.go b/eth/stagedsync/stage_snapshots.go index 5743f983d9b..c243cf321fc 100644 --- a/eth/stagedsync/stage_snapshots.go +++ b/eth/stagedsync/stage_snapshots.go @@ -300,7 +300,7 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R { cfg.blockReader.Snapshots().LogStat("download") - tx.(state.HasAggCtx).AggCtx().(*state.AggregatorRoTx).LogStats(tx, func(endTxNumMinimax uint64) uint64 { + tx.(state.HasAggTx).AggTx().(*state.AggregatorRoTx).LogStats(tx, func(endTxNumMinimax uint64) uint64 { _, histBlockNumProgress, _ := rawdbv3.TxNums.FindBlockNum(tx, endTxNumMinimax) return histBlockNumProgress }) diff --git a/eth/stagedsync/stage_trie3.go b/eth/stagedsync/stage_trie3.go index 1e234804ced..5f16605c7e9 100644 --- a/eth/stagedsync/stage_trie3.go +++ b/eth/stagedsync/stage_trie3.go @@ -29,7 +29,7 @@ func collectAndComputeCommitment(ctx context.Context, tx kv.RwTx, tmpDir string, return nil, err } defer domains.Close() - ac := domains.AggCtx().(*state.AggregatorRoTx) + ac := domains.AggTx().(*state.AggregatorRoTx) // has to set this value because it will be used during domain.Commit() call. // If we do not, txNum of block beginning will be used, which will cause invalid txNum on restart following commitment rebuilding @@ -189,7 +189,7 @@ func RebuildPatriciaTrieBasedOnFiles(rwTx kv.RwTx, cfg TrieCfg, ctx context.Cont } var foundHash bool - toTxNum := rwTx.(*temporal.Tx).AggCtx().(*state.AggregatorRoTx).EndTxNumNoCommitment() + toTxNum := rwTx.(*temporal.Tx).AggTx().(*state.AggregatorRoTx).EndTxNumNoCommitment() ok, blockNum, err := rawdbv3.TxNums.FindBlockNum(rwTx, toTxNum) if err != nil { return libcommon.Hash{}, err diff --git a/eth/stagedsync/sync.go b/eth/stagedsync/sync.go index 12d199c2f2f..a0ce6a892fd 100644 --- a/eth/stagedsync/sync.go +++ b/eth/stagedsync/sync.go @@ -137,9 +137,9 @@ func (s *Sync) IsAfter(stage1, stage2 stages.SyncStage) bool { func (s *Sync) HasUnwindPoint() bool { return s.unwindPoint != nil } func (s *Sync) UnwindTo(unwindPoint uint64, reason UnwindReason, tx kv.Tx) error { if tx != nil { - if casted, ok := tx.(state.HasAggCtx); ok { + if casted, ok := tx.(state.HasAggTx); ok { // protect from too far unwind - unwindPointWithCommitment, ok, err := casted.AggCtx().(*state.AggregatorRoTx).CanUnwindBeforeBlockNum(unwindPoint, tx) + unwindPointWithCommitment, ok, err := casted.AggTx().(*state.AggregatorRoTx).CanUnwindBeforeBlockNum(unwindPoint, tx) if err != nil { return err } From 5c0252b91656e15865780321cd0eb48fc0fdcc31 Mon Sep 17 00:00:00 2001 From: Willian Mitsuda Date: Sat, 11 May 2024 00:47:43 -0300 Subject: [PATCH 25/48] Add context to make function Ctrl+C friendly; add activity logs (#10280) For some reason, at least for the last 2 weeks I've seen some strange behavior, first on 2.60 branch and now on E3 main (not sure if it is related to some recent change or it randomly started manifesting < 2 weeks ago), when I start Erigon and it keep stuck in the "Downloading PoS headers" forever. Sometimes it takes +15 min to start downloading headers, but most times it gets stuck forever (have left it overnight with no success). While this PR doesn't fix the actual issue, while trying to do some debug I noticed: - there is no activity log, it looks like the node is doing nothing. - can't Ctrl+C to stop it when it gets in this state, have to kill it. This PR: - Replaces a for + sleep with a select channel + ticker + check - Adds a logEvery ticker - Listens to ctx.Done() in order to make it Ctrl+C cancellable. Logs before this PR (3 min without any activity + can't Ctrl+C): ``` INFO[05-10|16:20:16.399] [EngineBlockDownloader] Downloading PoS headers... hash=0x563f6871a197cef99b5c0bde819faa171d27a2a2d3ce8a8b86a9a3344b43501e requestId=0 INFO[05-10|16:23:06.121] [p2p] GoodPeers INFO[05-10|16:23:06.121] [mem] memory stats alloc=1.1GB sys=1.6GB INFO[05-10|16:23:07.949] [txpool] stat pending=0 baseFee=0 queued=0 alloc=1.1GB sys=1.6GB ^CINFO[05-10|16:25:54.723] Got interrupt, shutting down... sig=interrupt INFO[05-10|16:25:54.723] Got interrupt, shutting down... INFO[05-10|16:25:54.723] Exiting Engine... INFO[05-10|16:25:54.723] RPC server shutting down INFO[05-10|16:25:54.723] Exiting... INFO[05-10|16:25:54.723] RPC server shutting down INFO[05-10|16:25:54.724] HTTP endpoint closed url=[::]:8745 INFO[05-10|16:25:54.724] HTTP endpoint closed url=[::]:8546 INFO[05-10|16:25:54.724] RPC server shutting down INFO[05-10|16:25:54.724] Engine HTTP endpoint close url=127.0.0.1:8551 WARN[05-10|16:25:54.725] [EngineBlockDownloader] Header download did not yield success ^CWARN[05-10|16:26:02.793] Already shutting down, interrupt more to panic. times=9 ^CWARN[05-10|16:26:03.015] Already shutting down, interrupt more to panic. times=8 ^CWARN[05-10|16:26:03.197] Already shutting down, interrupt more to panic. times=7 ^CWARN[05-10|16:26:03.330] Already shutting down, interrupt more to panic. times=6 ^CWARN[05-10|16:26:03.458] Already shutting down, interrupt more to panic. times=5 ``` Logs after this PR (still no headers download :( , but now has activity logs every 30s + can Ctrl+C): ``` INFO[05-10|16:32:44.620] [EngineBlockDownloader] Downloading PoS headers... hash=0xdc9eb5145d75660d3a84e3ab2d302614bfcc57fa7eb6293305d6717b5f143a79 requestId=0 INFO[05-10|16:33:14.620] [EngineBlockDownloader] Waiting for headers download to finish INFO[05-10|16:33:44.620] [EngineBlockDownloader] Waiting for headers download to finish ^CINFO[05-10|16:34:13.139] Got interrupt, shutting down... sig=interrupt INFO[05-10|16:34:13.139] Got interrupt, shutting down... WARN[05-10|16:34:13.139] [EngineBlockDownloader] Could not finish headers download err="context canceled" INFO[05-10|16:34:13.139] Exiting Engine... INFO[05-10|16:34:13.139] Exiting... INFO[05-10|16:34:13.139] RPC server shutting down INFO[05-10|16:34:13.139] HTTP endpoint closed url=[::]:8745 INFO[05-10|16:34:13.139] RPC server shutting down INFO[05-10|16:34:13.139] HTTP endpoint closed url=[::]:8546 INFO[05-10|16:34:13.139] Engine HTTP endpoint close url=127.0.0.1:8551 INFO[05-10|16:34:13.139] RPC server shutting down ``` --- .../block_downloader.go | 22 +++++++++++++++---- .../engineapi/engine_block_downloader/core.go | 7 +++++- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/turbo/engineapi/engine_block_downloader/block_downloader.go b/turbo/engineapi/engine_block_downloader/block_downloader.go index fd6d35a083a..25e975b60d9 100644 --- a/turbo/engineapi/engine_block_downloader/block_downloader.go +++ b/turbo/engineapi/engine_block_downloader/block_downloader.go @@ -126,11 +126,25 @@ func (e *EngineBlockDownloader) scheduleHeadersDownload( } // waitForEndOfHeadersDownload waits until the download of headers ends and returns the outcome. -func (e *EngineBlockDownloader) waitForEndOfHeadersDownload() headerdownload.SyncStatus { - for e.hd.PosStatus() == headerdownload.Syncing { - time.Sleep(10 * time.Millisecond) +func (e *EngineBlockDownloader) waitForEndOfHeadersDownload(ctx context.Context) (headerdownload.SyncStatus, error) { + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + + logEvery := time.NewTicker(30 * time.Second) + defer logEvery.Stop() + + for { + select { + case <-ticker.C: + if e.hd.PosStatus() != headerdownload.Syncing { + return e.hd.PosStatus(), nil + } + case <-ctx.Done(): + return e.hd.PosStatus(), ctx.Err() + case <-logEvery.C: + e.logger.Info("[EngineBlockDownloader] Waiting for headers download to finish") + } } - return e.hd.PosStatus() } // waitForEndOfHeadersDownload waits until the download of headers ends and returns the outcome. diff --git a/turbo/engineapi/engine_block_downloader/core.go b/turbo/engineapi/engine_block_downloader/core.go index 6f07321b762..ade141f3c6c 100644 --- a/turbo/engineapi/engine_block_downloader/core.go +++ b/turbo/engineapi/engine_block_downloader/core.go @@ -22,7 +22,12 @@ func (e *EngineBlockDownloader) download(ctx context.Context, hashToDownload lib return } // see the outcome of header download - headersStatus := e.waitForEndOfHeadersDownload() + headersStatus, err := e.waitForEndOfHeadersDownload(ctx) + if err != nil { + e.logger.Warn("[EngineBlockDownloader] Could not finish headers download", "err", err) + e.status.Store(headerdownload.Idle) + return + } if headersStatus != headerdownload.Synced { // Could not sync. Set to idle From e2c5661b970df7ebc98d81a4ff747b5985f56558 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sat, 11 May 2024 21:57:02 +0700 Subject: [PATCH 26/48] add log-prefix to GasPriceOracle (#10283) --- eth/gasprice/feehistory.go | 4 ++-- eth/gasprice/gasprice.go | 20 ++++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go index 2e9923a2302..5a6c4d4c0e6 100644 --- a/eth/gasprice/feehistory.go +++ b/eth/gasprice/feehistory.go @@ -93,7 +93,7 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) { return } if bf.block == nil || (bf.receipts == nil && len(bf.block.Transactions()) != 0) { - log.Error("Block or receipts are missing while reward percentiles are requested") + log.Error("[GasPriceOracle] Block or receipts are missing while reward percentiles are requested") return } @@ -205,7 +205,7 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLast return libcommon.Big0, nil, nil, nil, nil // returning with no data and no error means there are no retrievable blocks } if blocks > maxFeeHistory { - log.Warn("Sanitizing fee history length", "requested", blocks, "truncated", maxFeeHistory) + log.Warn("[GasPriceOracle] Sanitizing fee history length", "requested", blocks, "truncated", maxFeeHistory) blocks = maxFeeHistory } for i, p := range rewardPercentiles { diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go index ad3bfd369d9..c30cb7fbb78 100644 --- a/eth/gasprice/gasprice.go +++ b/eth/gasprice/gasprice.go @@ -70,26 +70,26 @@ func NewOracle(backend OracleBackend, params gaspricecfg.Config, cache Cache) *O blocks := params.Blocks if blocks < 1 { blocks = 1 - log.Warn("Sanitizing invalid gasprice oracle sample blocks", "provided", params.Blocks, "updated", blocks) + log.Warn("[GasPriceOracle] Sanitizing invalid gasprice oracle sample blocks", "provided", params.Blocks, "updated", blocks) } percent := params.Percentile if percent < 0 { percent = 0 - log.Warn("Sanitizing invalid gasprice oracle sample percentile", "provided", params.Percentile, "updated", percent) + log.Warn("[GasPriceOracle] Sanitizing invalid gasprice oracle sample percentile", "provided", params.Percentile, "updated", percent) } if percent > 100 { percent = 100 - log.Warn("Sanitizing invalid gasprice oracle sample percentile", "provided", params.Percentile, "updated", percent) + log.Warn("[GasPriceOracle] Sanitizing invalid gasprice oracle sample percentile", "provided", params.Percentile, "updated", percent) } maxPrice := params.MaxPrice if maxPrice == nil || maxPrice.Int64() <= 0 { maxPrice = gaspricecfg.DefaultMaxPrice - log.Warn("Sanitizing invalid gasprice oracle price cap", "provided", params.MaxPrice, "updated", maxPrice) + log.Warn("[GasPriceOracle] Sanitizing invalid gasprice oracle price cap", "provided", params.MaxPrice, "updated", maxPrice) } ignorePrice := params.IgnorePrice if ignorePrice == nil || ignorePrice.Int64() < 0 { ignorePrice = gaspricecfg.DefaultIgnorePrice - log.Warn("Sanitizing invalid gasprice oracle ignore price", "provided", params.IgnorePrice, "updated", ignorePrice) + log.Warn("[GasPriceOracle] Sanitizing invalid gasprice oracle ignore price", "provided", params.IgnorePrice, "updated", ignorePrice) } setBorDefaultGpoIgnorePrice(backend.ChainConfig(), params) @@ -190,7 +190,7 @@ func (t *transactionsByGasPrice) Push(x interface{}) { // not just its contents. l, ok := x.(types.Transaction) if !ok { - log.Error("Type assertion failure", "err", "cannot get types.Transaction from interface") + log.Error("[GasPriceOracle] Type assertion failure", "err", "cannot get types.Transaction from interface") } t.txs = append(t.txs, l) } @@ -214,12 +214,12 @@ func (oracle *Oracle) getBlockPrices(ctx context.Context, blockNum uint64, limit ignoreUnder, overflow := uint256.FromBig(ingoreUnderBig) if overflow { err := errors.New("overflow in getBlockPrices, gasprice.go: ignoreUnder too large") - log.Error("gasprice.go: getBlockPrices", "err", err) + log.Error("[GasPriceOracle] getBlockPrices", "err", err) return err } block, err := oracle.backend.BlockByNumber(ctx, rpc.BlockNumber(blockNum)) if err != nil { - log.Error("gasprice.go: getBlockPrices", "err", err) + log.Error("[GasPriceOracle] getBlockPrices", "err", err) return err } @@ -237,7 +237,7 @@ func (oracle *Oracle) getBlockPrices(ctx context.Context, blockNum uint64, limit baseFee, overflow = uint256.FromBig(block.BaseFee()) if overflow { err := errors.New("overflow in getBlockPrices, gasprice.go: baseFee > 2^256-1") - log.Error("gasprice.go: getBlockPrices", "err", err) + log.Error("[GasPriceOracle] getBlockPrices", "err", err) return err } } @@ -287,7 +287,7 @@ func (s *sortingHeap) Pop() interface{} { // setBorDefaultGpoIgnorePrice enforces gpo IgnorePrice to be equal to BorDefaultGpoIgnorePrice (30gwei by default) func setBorDefaultGpoIgnorePrice(chainConfig *chain.Config, gasPriceConfig gaspricecfg.Config) { if chainConfig.Bor != nil && gasPriceConfig.IgnorePrice != gaspricecfg.BorDefaultGpoIgnorePrice { - log.Warn("Sanitizing invalid bor gasprice oracle ignore price", "provided", gasPriceConfig.IgnorePrice, "updated", gaspricecfg.BorDefaultGpoIgnorePrice) + log.Warn("[GasPriceOracle] Sanitizing invalid bor gasprice oracle ignore price", "provided", gasPriceConfig.IgnorePrice, "updated", gaspricecfg.BorDefaultGpoIgnorePrice) gasPriceConfig.IgnorePrice = gaspricecfg.BorDefaultGpoIgnorePrice } } From 3e54eafe6961bf5ff6c4852510aa01ab544eaece Mon Sep 17 00:00:00 2001 From: r3inbowari Date: Sun, 12 May 2024 18:43:05 +0800 Subject: [PATCH 27/48] diagnostics: add packet loss and cacheServerList for speedtest (#10259) --- erigon-lib/diagnostics/entities.go | 1 + erigon-lib/diagnostics/speedtest.go | 28 +++++++++++++++++++++++----- erigon-lib/go.mod | 2 +- erigon-lib/go.sum | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- 6 files changed, 30 insertions(+), 11 deletions(-) diff --git a/erigon-lib/diagnostics/entities.go b/erigon-lib/diagnostics/entities.go index 8d397985f22..71e745b32d1 100644 --- a/erigon-lib/diagnostics/entities.go +++ b/erigon-lib/diagnostics/entities.go @@ -257,6 +257,7 @@ type NetworkSpeedTestResult struct { Latency time.Duration `json:"latency"` DownloadSpeed float64 `json:"downloadSpeed"` UploadSpeed float64 `json:"uploadSpeed"` + PacketLoss float64 `json:"packetLoss"` } func (ti FileDownloadedStatisticsUpdate) Type() Type { diff --git a/erigon-lib/diagnostics/speedtest.go b/erigon-lib/diagnostics/speedtest.go index d2c463bbbbb..ab9a04008bc 100644 --- a/erigon-lib/diagnostics/speedtest.go +++ b/erigon-lib/diagnostics/speedtest.go @@ -5,6 +5,7 @@ import ( "time" "github.com/showwin/speedtest-go/speedtest" + "github.com/showwin/speedtest-go/speedtest/transport" ) func (d *DiagnosticClient) setupSpeedtestDiagnostics(rootCtx context.Context) { @@ -28,37 +29,54 @@ func (d *DiagnosticClient) setupSpeedtestDiagnostics(rootCtx context.Context) { }() } +var cacheServerList speedtest.Servers + func (d *DiagnosticClient) runSpeedTest(rootCtx context.Context) NetworkSpeedTestResult { var speedtestClient = speedtest.New() - serverList, _ := speedtestClient.FetchServers() - targets, _ := serverList.FindServer([]int{}) + + serverList, err := speedtestClient.FetchServers() + // Ensure that the server list can rolled back to the previous cache. + if err == nil { + cacheServerList = serverList + } + targets, _ := cacheServerList.FindServer([]int{}) latency := time.Duration(0) downloadSpeed := float64(0) uploadSpeed := float64(0) + packetLoss := float64(-1) + + analyzer := speedtest.NewPacketLossAnalyzer(nil) if len(targets) > 0 { s := targets[0] - err := s.PingTestContext(rootCtx, nil) + err = s.PingTestContext(rootCtx, nil) if err == nil { latency = s.Latency } err = s.DownloadTestContext(rootCtx) if err == nil { - downloadSpeed = s.DLSpeed + downloadSpeed = s.DLSpeed.Mbps() } err = s.UploadTestContext(rootCtx) if err == nil { - uploadSpeed = s.ULSpeed + uploadSpeed = s.ULSpeed.Mbps() } + + ctx, cancel := context.WithTimeout(rootCtx, time.Second*15) + defer cancel() + _ = analyzer.RunWithContext(ctx, s.Host, func(pl *transport.PLoss) { + packetLoss = pl.Loss() + }) } return NetworkSpeedTestResult{ Latency: latency, DownloadSpeed: downloadSpeed, UploadSpeed: uploadSpeed, + PacketLoss: packetLoss, } } diff --git a/erigon-lib/go.mod b/erigon-lib/go.mod index eeefcf97509..23911a1afb3 100644 --- a/erigon-lib/go.mod +++ b/erigon-lib/go.mod @@ -129,7 +129,7 @@ require ( github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/showwin/speedtest-go v1.6.12 + github.com/showwin/speedtest-go v1.7.5 github.com/sirupsen/logrus v1.9.3 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect diff --git a/erigon-lib/go.sum b/erigon-lib/go.sum index 57f098dd177..ea57eb86e67 100644 --- a/erigon-lib/go.sum +++ b/erigon-lib/go.sum @@ -418,8 +418,8 @@ github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFt github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/showwin/speedtest-go v1.6.12 h1:q+hWNn2cM35KkqtXGGbSmuJgd67gTP8+VlneY2hq9vU= -github.com/showwin/speedtest-go v1.6.12/go.mod h1:uLgdWCNarXxlYsL2E5TOZpCIwpgSWnEANZp7gfHXHu0= +github.com/showwin/speedtest-go v1.7.5 h1:FQ3EdM2vnfw5BRCRzGCYe8aWu70rr21Az5ZFHiW9CdE= +github.com/showwin/speedtest-go v1.7.5/go.mod h1:uLgdWCNarXxlYsL2E5TOZpCIwpgSWnEANZp7gfHXHu0= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= diff --git a/go.mod b/go.mod index 4f4bd143ccb..bbb06c8ded4 100644 --- a/go.mod +++ b/go.mod @@ -263,7 +263,7 @@ require ( github.com/shirou/gopsutil/v3 v3.24.3 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/shopspring/decimal v1.2.0 // indirect - github.com/showwin/speedtest-go v1.6.12 // indirect + github.com/showwin/speedtest-go v1.7.5 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sosodev/duration v1.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect diff --git a/go.sum b/go.sum index 1bfef335f1c..417c8efa6fd 100644 --- a/go.sum +++ b/go.sum @@ -825,8 +825,8 @@ github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/showwin/speedtest-go v1.6.12 h1:q+hWNn2cM35KkqtXGGbSmuJgd67gTP8+VlneY2hq9vU= -github.com/showwin/speedtest-go v1.6.12/go.mod h1:uLgdWCNarXxlYsL2E5TOZpCIwpgSWnEANZp7gfHXHu0= +github.com/showwin/speedtest-go v1.7.5 h1:FQ3EdM2vnfw5BRCRzGCYe8aWu70rr21Az5ZFHiW9CdE= +github.com/showwin/speedtest-go v1.7.5/go.mod h1:uLgdWCNarXxlYsL2E5TOZpCIwpgSWnEANZp7gfHXHu0= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= From 24bc319d69c1d0d6726969a5a617ca09dbdf5277 Mon Sep 17 00:00:00 2001 From: Willian Mitsuda Date: Sun, 12 May 2024 09:37:20 -0300 Subject: [PATCH 28/48] Fix reverse nil check (#10286) fixes https://github.com/ledgerwatch/erigon/issues/10285 however, looking at the code, a few lines above, I'm wondering why `tx` actually is nil here, can `tx, err = e.db.BeginRwNosync(ctx)` return `tx == nil` when there is no error? not clear to me what value `tx` is capturing inside the closure, I'm afraid this fix may hide another bug, I'm a little confused. --- turbo/execution/eth1/forkchoice.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/turbo/execution/eth1/forkchoice.go b/turbo/execution/eth1/forkchoice.go index 44f8ef9393a..8176b90c25d 100644 --- a/turbo/execution/eth1/forkchoice.go +++ b/turbo/execution/eth1/forkchoice.go @@ -333,7 +333,7 @@ TooBigJumpStep: return } defer func() { - if tx == nil { + if tx != nil { tx.Rollback() } }() From f4df7c22f80cc2af68e35f607abe07f9991a6ebf Mon Sep 17 00:00:00 2001 From: Willian Mitsuda Date: Mon, 13 May 2024 03:11:24 -0300 Subject: [PATCH 29/48] Refactor maxTxNumInFiles logic; rename cold param to clarify its meaning (#10290) This refactoring should not change any of current code behavior. - Extract the "get last txNum in files" logic which is duplicated in several places. - Rename the `cold` param name to `onlyFrozen` in `maxTxNumInFiles` because the current naming is misleading: - `cold == false`: returns the last txNum in [frozen..., cold...] range. - `cold == true`: returns the last txNum in [frozen...] range. --- erigon-lib/state/history.go | 4 ++-- erigon-lib/state/inverted_index.go | 9 +++++++-- erigon-lib/state/merge.go | 15 +++++++++------ 3 files changed, 18 insertions(+), 10 deletions(-) diff --git a/erigon-lib/state/history.go b/erigon-lib/state/history.go index 0ce48c959b0..5bfddadb2ec 100644 --- a/erigon-lib/state/history.go +++ b/erigon-lib/state/history.go @@ -1598,7 +1598,7 @@ func (ht *HistoryRoTx) iterateChangedFrozen(fromTxNum, toTxNum int, asc order.By return iter.EmptyKV, nil } - if fromTxNum >= 0 && ht.iit.files[len(ht.iit.files)-1].endTxNum <= uint64(fromTxNum) { + if fromTxNum >= 0 && ht.iit.lastTxNumInFiles() <= uint64(fromTxNum) { return iter.EmptyKV, nil } @@ -1635,7 +1635,7 @@ func (ht *HistoryRoTx) iterateChangedRecent(fromTxNum, toTxNum int, asc order.By if asc == order.Desc { panic("not supported yet") } - rangeIsInFiles := toTxNum >= 0 && len(ht.iit.files) > 0 && ht.iit.files[len(ht.iit.files)-1].endTxNum >= uint64(toTxNum) + rangeIsInFiles := toTxNum >= 0 && len(ht.iit.files) > 0 && ht.iit.lastTxNumInFiles() >= uint64(toTxNum) if rangeIsInFiles { return iter.EmptyKVS, nil } diff --git a/erigon-lib/state/inverted_index.go b/erigon-lib/state/inverted_index.go index bb79ec24a23..216f316bf3c 100644 --- a/erigon-lib/state/inverted_index.go +++ b/erigon-lib/state/inverted_index.go @@ -653,6 +653,11 @@ func (iit *InvertedIndexRoTx) seekInFiles(key []byte, txNum uint64) (found bool, return false, 0 } +// it is assumed files are always sorted +func (iit *InvertedIndexRoTx) lastTxNumInFiles() uint64 { + return iit.files[len(iit.files)-1].endTxNum +} + // IdxRange - return range of txNums for given `key` // is to be used in public API, therefore it relies on read-only transaction // so that iteration can be done even when the inverted index is being updated. @@ -672,12 +677,12 @@ func (iit *InvertedIndexRoTx) IdxRange(key []byte, startTxNum, endTxNum int, asc func (iit *InvertedIndexRoTx) recentIterateRange(key []byte, startTxNum, endTxNum int, asc order.By, limit int, roTx kv.Tx) (iter.U64, error) { //optimization: return empty pre-allocated iterator if range is frozen if asc { - isFrozenRange := len(iit.files) > 0 && endTxNum >= 0 && iit.files[len(iit.files)-1].endTxNum >= uint64(endTxNum) + isFrozenRange := len(iit.files) > 0 && endTxNum >= 0 && iit.lastTxNumInFiles() >= uint64(endTxNum) if isFrozenRange { return iter.EmptyU64, nil } } else { - isFrozenRange := len(iit.files) > 0 && startTxNum >= 0 && iit.files[len(iit.files)-1].endTxNum >= uint64(startTxNum) + isFrozenRange := len(iit.files) > 0 && startTxNum >= 0 && iit.lastTxNumInFiles() >= uint64(startTxNum) if isFrozenRange { return iter.EmptyU64, nil } diff --git a/erigon-lib/state/merge.go b/erigon-lib/state/merge.go index 189c950a9f0..25355398844 100644 --- a/erigon-lib/state/merge.go +++ b/erigon-lib/state/merge.go @@ -311,12 +311,12 @@ func (dt *DomainRoTx) maxTxNumInDomainFiles(cold bool) uint64 { return 0 } -func (ht *HistoryRoTx) maxTxNumInFiles(cold bool) uint64 { +func (ht *HistoryRoTx) maxTxNumInFiles(onlyFrozen bool) uint64 { if len(ht.files) == 0 { return 0 } var max uint64 - if cold { + if onlyFrozen { for i := len(ht.files) - 1; i >= 0; i-- { if !ht.files[i].src.frozen { continue @@ -327,15 +327,18 @@ func (ht *HistoryRoTx) maxTxNumInFiles(cold bool) uint64 { } else { max = ht.files[len(ht.files)-1].endTxNum } - return cmp.Min(max, ht.iit.maxTxNumInFiles(cold)) + return cmp.Min(max, ht.iit.maxTxNumInFiles(onlyFrozen)) } -func (iit *InvertedIndexRoTx) maxTxNumInFiles(cold bool) uint64 { + +func (iit *InvertedIndexRoTx) maxTxNumInFiles(onlyFrozen bool) uint64 { if len(iit.files) == 0 { return 0 } - if !cold { - return iit.files[len(iit.files)-1].endTxNum + if !onlyFrozen { + return iit.lastTxNumInFiles() } + + // files contains [frozen..., cold...] in that order for i := len(iit.files) - 1; i >= 0; i-- { if !iit.files[i].src.frozen { continue From 4ca333f8c7d3666cb617ca40c3e4655add5f97df Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 13 May 2024 15:13:07 +0700 Subject: [PATCH 30/48] Receipts: remove old rlp types (#10291) - i don't remove it 100% (even that E3 doesn't use them) - added `firstLogIndex` field - for use in future PR's --- core/types/access_list_tx.go | 7 + core/types/blob_tx.go | 8 + core/types/blob_tx_wrapper.go | 2 + core/types/dynamic_fee_tx.go | 7 + core/types/legacy_tx.go | 7 + core/types/receipt.go | 181 +++---- core/types/receipt_codecgen_gen.go | 769 ----------------------------- core/types/receipt_test.go | 212 +++++--- core/types/transaction.go | 1 + 9 files changed, 246 insertions(+), 948 deletions(-) delete mode 100644 core/types/receipt_codecgen_gen.go diff --git a/core/types/access_list_tx.go b/core/types/access_list_tx.go index c0b659fec12..f795ba0da0f 100644 --- a/core/types/access_list_tx.go +++ b/core/types/access_list_tx.go @@ -504,6 +504,13 @@ func (tx *AccessListTx) GetChainID() *uint256.Int { return tx.ChainID } +func (tx *AccessListTx) cashedSender() (sender libcommon.Address, ok bool) { + s := tx.from.Load() + if s == nil { + return sender, false + } + return s.(libcommon.Address), true +} func (tx *AccessListTx) Sender(signer Signer) (libcommon.Address, error) { if sc := tx.from.Load(); sc != nil { return sc.(libcommon.Address), nil diff --git a/core/types/blob_tx.go b/core/types/blob_tx.go index 0953d97d451..997d06fc45b 100644 --- a/core/types/blob_tx.go +++ b/core/types/blob_tx.go @@ -80,6 +80,14 @@ func (stx *BlobTx) AsMessage(s Signer, baseFee *big.Int, rules *chain.Rules) (Me return msg, err } +func (stx *BlobTx) cashedSender() (sender libcommon.Address, ok bool) { + s := stx.from.Load() + if s == nil { + return sender, false + } + return s.(libcommon.Address), true +} + func (stx *BlobTx) Sender(signer Signer) (libcommon.Address, error) { if sc := stx.from.Load(); sc != nil { return sc.(libcommon.Address), nil diff --git a/core/types/blob_tx_wrapper.go b/core/types/blob_tx_wrapper.go index 2565da90770..d7cd8781cf8 100644 --- a/core/types/blob_tx_wrapper.go +++ b/core/types/blob_tx_wrapper.go @@ -331,6 +331,8 @@ func (txw *BlobTxWrapper) RawSignatureValues() (*uint256.Int, *uint256.Int, *uin return txw.Tx.RawSignatureValues() } +func (txw *BlobTxWrapper) cashedSender() (libcommon.Address, bool) { return txw.Tx.cashedSender() } + func (txw *BlobTxWrapper) Sender(s Signer) (libcommon.Address, error) { return txw.Tx.Sender(s) } func (txw *BlobTxWrapper) GetSender() (libcommon.Address, bool) { return txw.Tx.GetSender() } diff --git a/core/types/dynamic_fee_tx.go b/core/types/dynamic_fee_tx.go index 11e4ec8b45b..f41fb0402ad 100644 --- a/core/types/dynamic_fee_tx.go +++ b/core/types/dynamic_fee_tx.go @@ -426,6 +426,13 @@ func (tx *DynamicFeeTransaction) GetChainID() *uint256.Int { return tx.ChainID } +func (tx *DynamicFeeTransaction) cashedSender() (sender libcommon.Address, ok bool) { + s := tx.from.Load() + if s == nil { + return sender, false + } + return s.(libcommon.Address), true +} func (tx *DynamicFeeTransaction) Sender(signer Signer) (libcommon.Address, error) { if sc := tx.from.Load(); sc != nil { return sc.(libcommon.Address), nil diff --git a/core/types/legacy_tx.go b/core/types/legacy_tx.go index 21139d9aa22..1d96520a85c 100644 --- a/core/types/legacy_tx.go +++ b/core/types/legacy_tx.go @@ -430,6 +430,13 @@ func (tx *LegacyTx) GetChainID() *uint256.Int { return DeriveChainId(&tx.V) } +func (tx *LegacyTx) cashedSender() (sender libcommon.Address, ok bool) { + s := tx.from.Load() + if s == nil { + return sender, false + } + return s.(libcommon.Address), true +} func (tx *LegacyTx) Sender(signer Signer) (libcommon.Address, error) { if sc := tx.from.Load(); sc != nil { return sc.(libcommon.Address), nil diff --git a/core/types/receipt.go b/core/types/receipt.go index 8741cbc6eae..e8378b966c5 100644 --- a/core/types/receipt.go +++ b/core/types/receipt.go @@ -32,14 +32,6 @@ import ( // go:generate gencodec -type Receipt -field-override receiptMarshaling -out gen_receipt_json.go -// disabling codecgen generation since it does not work for go1.22 -// to get it working need to update github.com/ugorji/go/codec to v1.2.12 which has the fix: -// - https://github.com/ugorji/go/commit/8286c2dc986535d23e3fad8d3e816b9dd1e5aea6 -// however updating the lib has caused us issues in the past, and we don't have good unit test coverage for updating atm -// we also use this for storing Receipts and Logs in the DB - we won't be doing that in Erigon 3 -// do not regen, more context: https://github.com/ledgerwatch/erigon/pull/10105#pullrequestreview-2027423601 -// go:generate codecgen -o receipt_codecgen_gen.go -r "^Receipts$|^Receipt$|^Logs$|^Log$" -st "codec" -j=false -nx=true -ta=true -oe=false -d 2 receipt.go log.go - var ( receiptStatusFailedRLP = []byte{} receiptStatusSuccessfulRLP = []byte{0x01} @@ -60,21 +52,23 @@ type Receipt struct { Type uint8 `json:"type,omitempty"` PostState []byte `json:"root" codec:"1"` Status uint64 `json:"status" codec:"2"` - CumulativeGasUsed uint64 `json:"cumulativeGasUsed" gencodec:"required" codec:"3"` - Bloom Bloom `json:"logsBloom" gencodec:"required" codec:"-"` - Logs Logs `json:"logs" gencodec:"required" codec:"-"` + CumulativeGasUsed uint64 `json:"cumulativeGasUsed" gencodec:"required"` + Bloom Bloom `json:"logsBloom" gencodec:"required"` + Logs Logs `json:"logs" gencodec:"required"` // Implementation fields: These fields are added by geth when processing a transaction. // They are stored in the chain database. - TxHash libcommon.Hash `json:"transactionHash" gencodec:"required" codec:"-"` - ContractAddress libcommon.Address `json:"contractAddress" codec:"-"` - GasUsed uint64 `json:"gasUsed" gencodec:"required" codec:"-"` + TxHash libcommon.Hash `json:"transactionHash" gencodec:"required"` + ContractAddress libcommon.Address `json:"contractAddress"` + GasUsed uint64 `json:"gasUsed" gencodec:"required"` // Inclusion information: These fields provide information about the inclusion of the // transaction corresponding to this receipt. - BlockHash libcommon.Hash `json:"blockHash,omitempty" codec:"-"` - BlockNumber *big.Int `json:"blockNumber,omitempty" codec:"-"` - TransactionIndex uint `json:"transactionIndex" codec:"-"` + BlockHash libcommon.Hash `json:"blockHash,omitempty"` + BlockNumber *big.Int `json:"blockNumber,omitempty"` + TransactionIndex uint `json:"transactionIndex"` + + firstLogIndex uint32 `json:"-"` // field which used to store in db and re-calc } type receiptMarshaling struct { @@ -99,28 +93,7 @@ type receiptRLP struct { type storedReceiptRLP struct { PostStateOrStatus []byte CumulativeGasUsed uint64 - Logs []*LogForStorage -} - -// v4StoredReceiptRLP is the storage encoding of a receipt used in database version 4. -type v4StoredReceiptRLP struct { - PostStateOrStatus []byte - CumulativeGasUsed uint64 - TxHash libcommon.Hash - ContractAddress libcommon.Address - Logs []*LogForStorage - GasUsed uint64 -} - -// v3StoredReceiptRLP is the original storage encoding of a receipt including some unnecessary fields. -type v3StoredReceiptRLP struct { - PostStateOrStatus []byte - CumulativeGasUsed uint64 - //Bloom Bloom - //TxHash libcommon.Hash - ContractAddress libcommon.Address - Logs []*LogForStorage - GasUsed uint64 + FirstLogIndex uint32 // Logs have their own incremental Index within block. To allow calc it without re-executing whole block - can store it in Receipt } // NewReceipt creates a barebone transaction receipt, copying the init fields. @@ -328,99 +301,45 @@ func (r *Receipt) Copy() *Receipt { type ReceiptsForStorage []*ReceiptForStorage -// ReceiptForStorage is a wrapper around a Receipt that flattens and parses the -// entire content of a receipt, as opposed to only the consensus fields originally. +// ReceiptForStorage is a wrapper around a Receipt with RLP serialization +// that omits the Bloom field and deserialization that re-computes it. type ReceiptForStorage Receipt // EncodeRLP implements rlp.Encoder, and flattens all content fields of a receipt // into an RLP stream. func (r *ReceiptForStorage) EncodeRLP(w io.Writer) error { - enc := &storedReceiptRLP{ + var firstLogIndex uint32 + if len(r.Logs) > 0 { + firstLogIndex = uint32(r.Logs[0].Index) + } + return rlp.Encode(w, &storedReceiptRLP{ PostStateOrStatus: (*Receipt)(r).statusEncoding(), CumulativeGasUsed: r.CumulativeGasUsed, - Logs: make([]*LogForStorage, len(r.Logs)), - } - for i, log := range r.Logs { - enc.Logs[i] = (*LogForStorage)(log) - } - return rlp.Encode(w, enc) + FirstLogIndex: firstLogIndex, + }) } // DecodeRLP implements rlp.Decoder, and loads both consensus and implementation // fields of a receipt from an RLP stream. func (r *ReceiptForStorage) DecodeRLP(s *rlp.Stream) error { - // Retrieve the entire receipt blob as we need to try multiple decoders - blob, err := s.Raw() - if err != nil { - return err - } - // Try decoding from the newest format for future proofness, then the older one - // for old nodes that just upgraded. V4 was an intermediate unreleased format so - // we do need to decode it, but it's not common (try last). - if err := decodeStoredReceiptRLP(r, blob); err == nil { - return nil - } - if err := decodeV3StoredReceiptRLP(r, blob); err == nil { - return nil - } - return decodeV4StoredReceiptRLP(r, blob) -} - -func decodeStoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { var stored storedReceiptRLP - if err := rlp.DecodeBytes(blob, &stored); err != nil { + if err := s.Decode(&stored); err != nil { return err } if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { return err } r.CumulativeGasUsed = stored.CumulativeGasUsed - r.Logs = make([]*Log, len(stored.Logs)) - for i, log := range stored.Logs { - r.Logs[i] = (*Log)(log) - } - //r.Bloom = CreateBloom(Receipts{(*Receipt)(r)}) - - return nil -} + r.firstLogIndex = stored.FirstLogIndex -func decodeV4StoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { - var stored v4StoredReceiptRLP - if err := rlp.DecodeBytes(blob, &stored); err != nil { - return err - } - if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { - return err - } - r.CumulativeGasUsed = stored.CumulativeGasUsed - r.TxHash = stored.TxHash - r.ContractAddress = stored.ContractAddress - r.GasUsed = stored.GasUsed - r.Logs = make([]*Log, len(stored.Logs)) - for i, log := range stored.Logs { - r.Logs[i] = (*Log)(log) - } + //r.Logs = make([]*Log, len(stored.Logs)) + //for i, log := range stored.Logs { + // r.Logs[i] = (*Log)(log) + //} //r.Bloom = CreateBloom(Receipts{(*Receipt)(r)}) return nil -} -func decodeV3StoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { - var stored v3StoredReceiptRLP - if err := rlp.DecodeBytes(blob, &stored); err != nil { - return err - } - if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { - return err - } - r.CumulativeGasUsed = stored.CumulativeGasUsed - r.ContractAddress = stored.ContractAddress - r.GasUsed = stored.GasUsed - r.Logs = make([]*Log, len(stored.Logs)) - for i, log := range stored.Logs { - r.Logs[i] = (*Log)(log) - } - return nil } // Receipts implements DerivableList for receipts. @@ -508,3 +427,49 @@ func (r Receipts) DeriveFields(hash libcommon.Hash, number uint64, txs Transacti } return nil } + +// DeriveFields fills the receipts with their computed fields based on consensus +// data and contextual infos like containing block and transactions. +func (rl Receipts) DeriveFieldsV3ForSingleReceipt(i int, blockHash libcommon.Hash, blockNum uint64, txn Transaction) (*Receipt, error) { + r := rl[i] + logIndex := r.firstLogIndex // logIdx is unique within the block and starts from 0 + + sender, ok := txn.cashedSender() + if !ok { + return nil, fmt.Errorf("tx must have cached sender") + } + + blockNumber := new(big.Int).SetUint64(blockNum) + // The transaction type and hash can be retrieved from the transaction itself + r.Type = txn.Type() + r.TxHash = txn.Hash() + + // block location fields + r.BlockHash = blockHash + r.BlockNumber = blockNumber + r.TransactionIndex = uint(i) + + // The contract address can be derived from the transaction itself + if txn.GetTo() == nil { + // If one wants to deploy a contract, one needs to send a transaction that does not have `To` field + // and then the address of the contract one is creating this way will depend on the `tx.From` + // and the nonce of the creating account (which is `tx.From`). + r.ContractAddress = crypto.CreateAddress(sender, txn.GetNonce()) + } + // The used gas can be calculated based on previous r + if i == 0 { + r.GasUsed = r.CumulativeGasUsed + } else { + r.GasUsed = r.CumulativeGasUsed - rl[i-1].CumulativeGasUsed + } + // The derived log fields can simply be set from the block and transaction + for j := 0; j < len(r.Logs); j++ { + r.Logs[j].BlockNumber = blockNum + r.Logs[j].BlockHash = blockHash + r.Logs[j].TxHash = r.TxHash + r.Logs[j].TxIndex = uint(i) + r.Logs[j].Index = uint(logIndex) + logIndex++ + } + return r, nil +} diff --git a/core/types/receipt_codecgen_gen.go b/core/types/receipt_codecgen_gen.go deleted file mode 100644 index e2bc7db9db1..00000000000 --- a/core/types/receipt_codecgen_gen.go +++ /dev/null @@ -1,769 +0,0 @@ -//go:build go1.6 -// +build go1.6 - -// Code generated by codecgen - DO NOT EDIT. - -package types - -import ( - "errors" - libcommon "github.com/ledgerwatch/erigon-lib/common" - codec1978 "github.com/ugorji/go/codec" - pkg2_big "math/big" - "runtime" - "strconv" -) - -const ( - // ----- content types ---- - codecSelferCcUTF82 = 1 - codecSelferCcRAW2 = 255 - // ----- value types used ---- - codecSelferValueTypeArray2 = 10 - codecSelferValueTypeMap2 = 9 - codecSelferValueTypeString2 = 6 - codecSelferValueTypeInt2 = 2 - codecSelferValueTypeUint2 = 3 - codecSelferValueTypeFloat2 = 4 - codecSelferValueTypeNil2 = 1 - codecSelferBitsize2 = uint8(32 << (^uint(0) >> 63)) - codecSelferDecContainerLenNil2 = -2147483648 -) - -var ( - errCodecSelferOnlyMapOrArrayEncodeToStruct2 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer2 struct{} - -func codecSelfer2False() bool { return false } -func codecSelfer2True() bool { return true } - -func init() { - if codec1978.GenVersion != 19 { - _, file, _, _ := runtime.Caller(0) - ver := strconv.FormatInt(int64(codec1978.GenVersion), 10) - panic(errors.New("codecgen version mismatch: current: 19, need " + ver + ". Re-generate file: " + file)) - } - if false { // reference the types, but skip this branch at build/run time - var _ libcommon.Address - var _ pkg2_big.Int - } -} - -func (x *Receipt) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - if !z.EncBinary() && z.IsJSONHandle() { - z.EncJSONMarshal(*x) - } else { - yy2arr2 := z.EncBasicHandle().StructToArray - _ = yy2arr2 - const yyr2 bool = false // struct tag has 'toArray' - z.EncWriteArrayStart(4) - z.EncWriteArrayElem() - r.EncodeUint(uint64(x.Type)) - z.EncWriteArrayElem() - if x.PostState == nil { - r.EncodeNil() - } else { - r.EncodeStringBytesRaw([]byte(x.PostState)) - } // end block: if x.PostState slice == nil - z.EncWriteArrayElem() - r.EncodeUint(uint64(x.Status)) - z.EncWriteArrayElem() - r.EncodeUint(uint64(x.CumulativeGasUsed)) - z.EncWriteArrayEnd() - } - } -} - -func (x *Receipt) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - if !z.DecBinary() && z.IsJSONHandle() { - z.DecJSONUnmarshal(x) - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeNil2 { - *(x) = Receipt{} - } else if yyct2 == codecSelferValueTypeMap2 { - yyl2 := z.DecReadMapStart() - if yyl2 == 0 { - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - z.DecReadMapEnd() - } else if yyct2 == codecSelferValueTypeArray2 { - yyl2 := z.DecReadArrayStart() - if yyl2 != 0 { - x.codecDecodeSelfFromArray(yyl2, d) - } - z.DecReadArrayEnd() - } else { - panic(errCodecSelferOnlyMapOrArrayEncodeToStruct2) - } - } -} - -func (x *Receipt) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if z.DecCheckBreak() { - break - } - } - z.DecReadMapElemKey() - yys3 := z.StringView(r.DecodeStringAsBytes()) - z.DecReadMapElemValue() - switch yys3 { - case "Type": - x.Type = (uint8)(z.C.UintV(r.DecodeUint64(), 8)) - case "1": - x.PostState = r.DecodeBytes(([]byte)(x.PostState), false) - case "2": - x.Status = (uint64)(r.DecodeUint64()) - case "3": - x.CumulativeGasUsed = (uint64)(r.DecodeUint64()) - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 -} - -func (x *Receipt) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = z.DecCheckBreak() - } - if yyb9 { - z.DecReadArrayEnd() - return - } - z.DecReadArrayElem() - x.Type = (uint8)(z.C.UintV(r.DecodeUint64(), 8)) - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = z.DecCheckBreak() - } - if yyb9 { - z.DecReadArrayEnd() - return - } - z.DecReadArrayElem() - x.PostState = r.DecodeBytes(([]byte)(x.PostState), false) - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = z.DecCheckBreak() - } - if yyb9 { - z.DecReadArrayEnd() - return - } - z.DecReadArrayElem() - x.Status = (uint64)(r.DecodeUint64()) - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = z.DecCheckBreak() - } - if yyb9 { - z.DecReadArrayEnd() - return - } - z.DecReadArrayElem() - x.CumulativeGasUsed = (uint64)(r.DecodeUint64()) - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = z.DecCheckBreak() - } - if yyb9 { - break - } - z.DecReadArrayElem() - z.DecStructFieldNotFound(yyj9-1, "") - } -} - -func (x *Receipt) IsCodecEmpty() bool { - return !(x.Type != 0 && len(x.PostState) != 0 && x.Status != 0 && x.CumulativeGasUsed != 0 && true) -} - -func (x Receipts) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - h.encReceipts((Receipts)(x), e) - } // end block: if x slice == nil -} - -func (x *Receipts) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - h.decReceipts((*Receipts)(x), d) -} - -func (x *Log) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - if !z.EncBinary() && z.IsJSONHandle() { - z.EncJSONMarshal(*x) - } else { - yy2arr2 := z.EncBasicHandle().StructToArray - _ = yy2arr2 - const yyr2 bool = false // struct tag has 'toArray' - z.EncWriteArrayStart(3) - z.EncWriteArrayElem() - yy6 := &x.Address - if !z.EncBinary() { - z.EncTextMarshal(*yy6) - } else { - h.enccommon_Address((*libcommon.Address)(yy6), e) - } - z.EncWriteArrayElem() - if x.Topics == nil { - r.EncodeNil() - } else { - h.encSlicecommon_Hash(([]libcommon.Hash)(x.Topics), e) - } // end block: if x.Topics slice == nil - z.EncWriteArrayElem() - if x.Data == nil { - r.EncodeNil() - } else { - r.EncodeStringBytesRaw([]byte(x.Data)) - } // end block: if x.Data slice == nil - z.EncWriteArrayEnd() - } - } -} - -func (x *Log) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - if !z.DecBinary() && z.IsJSONHandle() { - z.DecJSONUnmarshal(x) - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeNil2 { - *(x) = Log{} - } else if yyct2 == codecSelferValueTypeMap2 { - yyl2 := z.DecReadMapStart() - if yyl2 == 0 { - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - z.DecReadMapEnd() - } else if yyct2 == codecSelferValueTypeArray2 { - yyl2 := z.DecReadArrayStart() - if yyl2 != 0 { - x.codecDecodeSelfFromArray(yyl2, d) - } - z.DecReadArrayEnd() - } else { - panic(errCodecSelferOnlyMapOrArrayEncodeToStruct2) - } - } -} - -func (x *Log) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if z.DecCheckBreak() { - break - } - } - z.DecReadMapElemKey() - yys3 := z.StringView(r.DecodeStringAsBytes()) - z.DecReadMapElemValue() - switch yys3 { - case "1": - if !z.DecBinary() && z.IsJSONHandle() { - z.DecJSONUnmarshal(&x.Address) - } else { - h.deccommon_Address((*libcommon.Address)(&x.Address), d) - } - case "2": - h.decSlicecommon_Hash((*[]libcommon.Hash)(&x.Topics), d) - case "3": - x.Data = r.DecodeBytes(([]byte)(x.Data), false) - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 -} - -func (x *Log) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = z.DecCheckBreak() - } - if yyb10 { - z.DecReadArrayEnd() - return - } - z.DecReadArrayElem() - if !z.DecBinary() && z.IsJSONHandle() { - z.DecJSONUnmarshal(&x.Address) - } else { - h.deccommon_Address((*libcommon.Address)(&x.Address), d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = z.DecCheckBreak() - } - if yyb10 { - z.DecReadArrayEnd() - return - } - z.DecReadArrayElem() - h.decSlicecommon_Hash((*[]libcommon.Hash)(&x.Topics), d) - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = z.DecCheckBreak() - } - if yyb10 { - z.DecReadArrayEnd() - return - } - z.DecReadArrayElem() - x.Data = r.DecodeBytes(([]byte)(x.Data), false) - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = z.DecCheckBreak() - } - if yyb10 { - break - } - z.DecReadArrayElem() - z.DecStructFieldNotFound(yyj10-1, "") - } -} - -func (x *Log) IsCodecEmpty() bool { - return !(len(x.Address) != 0 && len(x.Topics) != 0 && len(x.Data) != 0 && true) -} - -func (x Logs) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - h.encLogs((Logs)(x), e) - } // end block: if x slice == nil -} - -func (x *Logs) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - h.decLogs((*Logs)(x), d) -} - -func (x codecSelfer2) encReceipts(v Receipts, e *codec1978.Encoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if v == nil { - r.EncodeNil() - return - } - z.EncWriteArrayStart(len(v)) - for _, yyv1 := range v { - z.EncWriteArrayElem() - if yyv1 == nil { - r.EncodeNil() - } else { - yyv1.CodecEncodeSelf(e) - } - } - z.EncWriteArrayEnd() -} - -func (x codecSelfer2) decReceipts(v *Receipts, d *codec1978.Decoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyh1.IsNil { - if yyv1 != nil { - yyv1 = nil - yyc1 = true - } - } else if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []*Receipt{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else { - yyhl1 := yyl1 > 0 - var yyrl1 int - _ = yyrl1 - if yyhl1 { - if yyl1 > cap(yyv1) { - yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]*Receipt, yyrl1) - } - yyc1 = true - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - } - var yyj1 int - for yyj1 = 0; (yyhl1 && yyj1 < yyl1) || !(yyhl1 || z.DecCheckBreak()); yyj1++ { // bounds-check-elimination - if yyj1 == 0 && yyv1 == nil { - if yyhl1 { - yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) - } else { - yyrl1 = 8 - } - yyv1 = make([]*Receipt, yyrl1) - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - var yydb1 bool - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, nil) - yyc1 = true - } - if yydb1 { - z.DecSwallow() - } else { - if r.TryNil() { - yyv1[yyj1] = nil - } else { - if yyv1[yyj1] == nil { - yyv1[yyj1] = new(Receipt) - } - yyv1[yyj1].CodecDecodeSelf(d) - } - } - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = make([]*Receipt, 0) - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer2) enccommon_Address(v *libcommon.Address, e *codec1978.Encoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if v == nil { - r.EncodeNil() - return - } - r.EncodeStringBytesRaw(((*[20]byte)(v))[:]) -} - -func (x codecSelfer2) deccommon_Address(v *libcommon.Address, d *codec1978.Decoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - r.DecodeBytes(((*[20]byte)(v))[:], true) -} - -func (x codecSelfer2) encSlicecommon_Hash(v []libcommon.Hash, e *codec1978.Encoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if v == nil { - r.EncodeNil() - return - } - z.EncWriteArrayStart(len(v)) - for _, yyv1 := range v { - z.EncWriteArrayElem() - yy2 := &yyv1 - if !z.EncBinary() { - z.EncTextMarshal(*yy2) - } else { - h.enccommon_Hash((*libcommon.Hash)(yy2), e) - } - } - z.EncWriteArrayEnd() -} - -func (x codecSelfer2) decSlicecommon_Hash(v *[]libcommon.Hash, d *codec1978.Decoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyh1.IsNil { - if yyv1 != nil { - yyv1 = nil - yyc1 = true - } - } else if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []libcommon.Hash{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else { - yyhl1 := yyl1 > 0 - var yyrl1 int - _ = yyrl1 - if yyhl1 { - if yyl1 > cap(yyv1) { - yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]libcommon.Hash, yyrl1) - } - yyc1 = true - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - } - var yyj1 int - for yyj1 = 0; (yyhl1 && yyj1 < yyl1) || !(yyhl1 || z.DecCheckBreak()); yyj1++ { // bounds-check-elimination - if yyj1 == 0 && yyv1 == nil { - if yyhl1 { - yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - } else { - yyrl1 = 8 - } - yyv1 = make([]libcommon.Hash, yyrl1) - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - var yydb1 bool - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, libcommon.Hash{}) - yyc1 = true - } - if yydb1 { - z.DecSwallow() - } else { - if !z.DecBinary() && z.IsJSONHandle() { - z.DecJSONUnmarshal(&yyv1[yyj1]) - } else { - h.deccommon_Hash((*libcommon.Hash)(&yyv1[yyj1]), d) - } - } - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = make([]libcommon.Hash, 0) - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer2) enccommon_Hash(v *libcommon.Hash, e *codec1978.Encoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if v == nil { - r.EncodeNil() - return - } - r.EncodeStringBytesRaw(((*[32]byte)(v))[:]) -} - -func (x codecSelfer2) deccommon_Hash(v *libcommon.Hash, d *codec1978.Decoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - r.DecodeBytes(((*[32]byte)(v))[:], true) -} - -func (x codecSelfer2) encLogs(v Logs, e *codec1978.Encoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if v == nil { - r.EncodeNil() - return - } - z.EncWriteArrayStart(len(v)) - for _, yyv1 := range v { - z.EncWriteArrayElem() - if yyv1 == nil { - r.EncodeNil() - } else { - yyv1.CodecEncodeSelf(e) - } - } - z.EncWriteArrayEnd() -} - -func (x codecSelfer2) decLogs(v *Logs, d *codec1978.Decoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyh1.IsNil { - if yyv1 != nil { - yyv1 = nil - yyc1 = true - } - } else if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []*Log{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else { - yyhl1 := yyl1 > 0 - var yyrl1 int - _ = yyrl1 - if yyhl1 { - if yyl1 > cap(yyv1) { - yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]*Log, yyrl1) - } - yyc1 = true - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - } - var yyj1 int - for yyj1 = 0; (yyhl1 && yyj1 < yyl1) || !(yyhl1 || z.DecCheckBreak()); yyj1++ { // bounds-check-elimination - if yyj1 == 0 && yyv1 == nil { - if yyhl1 { - yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) - } else { - yyrl1 = 8 - } - yyv1 = make([]*Log, yyrl1) - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - var yydb1 bool - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, nil) - yyc1 = true - } - if yydb1 { - z.DecSwallow() - } else { - if r.TryNil() { - yyv1[yyj1] = nil - } else { - if yyv1[yyj1] == nil { - yyv1[yyj1] = new(Log) - } - yyv1[yyj1].CodecDecodeSelf(d) - } - } - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = make([]*Log, 0) - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/core/types/receipt_test.go b/core/types/receipt_test.go index 4eb2f1a9d67..27d78251c7f 100644 --- a/core/types/receipt_test.go +++ b/core/types/receipt_test.go @@ -21,11 +21,11 @@ import ( "errors" "math" "math/big" - "reflect" "testing" "github.com/holiman/uint256" libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/stretchr/testify/assert" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/u256" @@ -66,11 +66,13 @@ func TestLegacyReceiptDecoding(t *testing.T) { Address: libcommon.BytesToAddress([]byte{0x11}), Topics: []libcommon.Hash{libcommon.HexToHash("dead"), libcommon.HexToHash("beef")}, Data: []byte{0x01, 0x00, 0xff}, + Index: 999, }, { Address: libcommon.BytesToAddress([]byte{0x01, 0x11}), Topics: []libcommon.Hash{libcommon.HexToHash("dead"), libcommon.HexToHash("beef")}, Data: []byte{0x01, 0x00, 0xff}, + Index: 1000, }, }, TxHash: tx.Hash(), @@ -98,34 +100,33 @@ func TestLegacyReceiptDecoding(t *testing.T) { if dec.CumulativeGasUsed != receipt.CumulativeGasUsed { t.Fatalf("Receipt CumulativeGasUsed mismatch, want %v, have %v", receipt.CumulativeGasUsed, dec.CumulativeGasUsed) } - if len(dec.Logs) != len(receipt.Logs) { - t.Fatalf("Receipt log number mismatch, want %v, have %v", len(receipt.Logs), len(dec.Logs)) - } - for i := 0; i < len(dec.Logs); i++ { - if dec.Logs[i].Address != receipt.Logs[i].Address { - t.Fatalf("Receipt log %d address mismatch, want %v, have %v", i, receipt.Logs[i].Address, dec.Logs[i].Address) - } - if !reflect.DeepEqual(dec.Logs[i].Topics, receipt.Logs[i].Topics) { - t.Fatalf("Receipt log %d topics mismatch, want %v, have %v", i, receipt.Logs[i].Topics, dec.Logs[i].Topics) - } - if !bytes.Equal(dec.Logs[i].Data, receipt.Logs[i].Data) { - t.Fatalf("Receipt log %d data mismatch, want %v, have %v", i, receipt.Logs[i].Data, dec.Logs[i].Data) - } - } + assert.Equal(t, uint32(receipt.Logs[0].Index), dec.firstLogIndex) + //if len(dec.Logs) != len(receipt.Logs) { + // t.Fatalf("Receipt log number mismatch, want %v, have %v", len(receipt.Logs), len(dec.Logs)) + //} + //for i := 0; i < len(dec.Logs); i++ { + // if dec.Logs[i].Address != receipt.Logs[i].Address { + // t.Fatalf("Receipt log %d address mismatch, want %v, have %v", i, receipt.Logs[i].Address, dec.Logs[i].Address) + // } + // if !reflect.DeepEqual(dec.Logs[i].Topics, receipt.Logs[i].Topics) { + // t.Fatalf("Receipt log %d topics mismatch, want %v, have %v", i, receipt.Logs[i].Topics, dec.Logs[i].Topics) + // } + // if !bytes.Equal(dec.Logs[i].Data, receipt.Logs[i].Data) { + // t.Fatalf("Receipt log %d data mismatch, want %v, have %v", i, receipt.Logs[i].Data, dec.Logs[i].Data) + // } + //} }) } } func encodeAsStoredReceiptRLP(want *Receipt) ([]byte, error) { - stored := &storedReceiptRLP{ - PostStateOrStatus: want.statusEncoding(), - CumulativeGasUsed: want.CumulativeGasUsed, - Logs: make([]*LogForStorage, len(want.Logs)), - } - for i, log := range want.Logs { - stored.Logs[i] = (*LogForStorage)(log) + w := bytes.NewBuffer(nil) + casted := ReceiptForStorage(*want) + err := casted.EncodeRLP(w) + if err != nil { + return nil, err } - return rlp.EncodeToBytes(stored) + return w.Bytes(), nil } // Tests that receipt data can be correctly derived from the contextual infos @@ -176,6 +177,7 @@ func TestDeriveFields(t *testing.T) { TxHash: txs[0].Hash(), ContractAddress: libcommon.BytesToAddress([]byte{0x01, 0x11, 0x11}), GasUsed: 1, + firstLogIndex: 0, }, &Receipt{ PostState: libcommon.Hash{2}.Bytes(), @@ -187,6 +189,7 @@ func TestDeriveFields(t *testing.T) { TxHash: txs[1].Hash(), ContractAddress: libcommon.BytesToAddress([]byte{0x02, 0x22, 0x22}), GasUsed: 2, + firstLogIndex: 2, }, &Receipt{ Type: AccessListTxType, @@ -199,69 +202,136 @@ func TestDeriveFields(t *testing.T) { TxHash: txs[2].Hash(), ContractAddress: libcommon.BytesToAddress([]byte{0x03, 0x33, 0x33}), GasUsed: 3, + firstLogIndex: 4, }, } // Clear all the computed fields and re-derive them number := big.NewInt(1) hash := libcommon.BytesToHash([]byte{0x03, 0x14}) - clearComputedFieldsOnReceipts(t, receipts) - if err := receipts.DeriveFields(hash, number.Uint64(), txs, []libcommon.Address{libcommon.BytesToAddress([]byte{0x0}), libcommon.BytesToAddress([]byte{0x0}), libcommon.BytesToAddress([]byte{0x0})}); err != nil { - t.Fatalf("DeriveFields(...) = %v, want ", err) - } - // Iterate over all the computed fields and check that they're correct - signer := MakeSigner(params.TestChainConfig, number.Uint64(), 0) - - logIndex := uint(0) - for i := range receipts { - if receipts[i].Type != txs[i].Type() { - t.Errorf("receipts[%d].Type = %d, want %d", i, receipts[i].Type, txs[i].Type()) - } - if receipts[i].TxHash != txs[i].Hash() { - t.Errorf("receipts[%d].TxHash = %s, want %s", i, receipts[i].TxHash.String(), txs[i].Hash().String()) - } - if receipts[i].BlockHash != hash { - t.Errorf("receipts[%d].BlockHash = %s, want %s", i, receipts[i].BlockHash.String(), hash.String()) - } - if receipts[i].BlockNumber.Cmp(number) != 0 { - t.Errorf("receipts[%c].BlockNumber = %s, want %s", i, receipts[i].BlockNumber.String(), number.String()) - } - if receipts[i].TransactionIndex != uint(i) { - t.Errorf("receipts[%d].TransactionIndex = %d, want %d", i, receipts[i].TransactionIndex, i) + t.Run("DeriveV1", func(t *testing.T) { + clearComputedFieldsOnReceipts(t, receipts) + if err := receipts.DeriveFields(hash, number.Uint64(), txs, []libcommon.Address{libcommon.BytesToAddress([]byte{0x0}), libcommon.BytesToAddress([]byte{0x0}), libcommon.BytesToAddress([]byte{0x0})}); err != nil { + t.Fatalf("DeriveFields(...) = %v, want ", err) } - if receipts[i].GasUsed != txs[i].GetGas() { - t.Errorf("receipts[%d].GasUsed = %d, want %d", i, receipts[i].GasUsed, txs[i].GetGas()) - } - if txs[i].GetTo() != nil && receipts[i].ContractAddress != (libcommon.Address{}) { - t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, receipts[i].ContractAddress.String(), (libcommon.Address{}).String()) - } - from, _ := txs[i].Sender(*signer) - contractAddress := crypto.CreateAddress(from, txs[i].GetNonce()) - if txs[i].GetTo() == nil && receipts[i].ContractAddress != contractAddress { - t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, receipts[i].ContractAddress.String(), contractAddress.String()) + // Iterate over all the computed fields and check that they're correct + signer := MakeSigner(params.TestChainConfig, number.Uint64(), 0) + + logIndex := uint(0) + for i, r := range receipts { + if r.Type != txs[i].Type() { + t.Errorf("receipts[%d].Type = %d, want %d", i, r.Type, txs[i].Type()) + } + if r.TxHash != txs[i].Hash() { + t.Errorf("receipts[%d].TxHash = %s, want %s", i, r.TxHash.String(), txs[i].Hash().String()) + } + if r.BlockHash != hash { + t.Errorf("receipts[%d].BlockHash = %s, want %s", i, r.BlockHash.String(), hash.String()) + } + if r.BlockNumber.Cmp(number) != 0 { + t.Errorf("receipts[%c].BlockNumber = %s, want %s", i, r.BlockNumber.String(), number.String()) + } + if r.TransactionIndex != uint(i) { + t.Errorf("receipts[%d].TransactionIndex = %d, want %d", i, r.TransactionIndex, i) + } + if r.GasUsed != txs[i].GetGas() { + t.Errorf("receipts[%d].GasUsed = %d, want %d", i, r.GasUsed, txs[i].GetGas()) + } + if txs[i].GetTo() != nil && r.ContractAddress != (libcommon.Address{}) { + t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, r.ContractAddress.String(), (libcommon.Address{}).String()) + } + from, _ := txs[i].Sender(*signer) + contractAddress := crypto.CreateAddress(from, txs[i].GetNonce()) + if txs[i].GetTo() == nil && r.ContractAddress != contractAddress { + t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, r.ContractAddress.String(), contractAddress.String()) + } + for j := range r.Logs { + if r.Logs[j].BlockNumber != number.Uint64() { + t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, r.Logs[j].BlockNumber, number.Uint64()) + } + if r.Logs[j].BlockHash != hash { + t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, r.Logs[j].BlockHash.String(), hash.String()) + } + if r.Logs[j].TxHash != txs[i].Hash() { + t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, r.Logs[j].TxHash.String(), txs[i].Hash().String()) + } + if r.Logs[j].TxHash != txs[i].Hash() { + t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, r.Logs[j].TxHash.String(), txs[i].Hash().String()) + } + if r.Logs[j].TxIndex != uint(i) { + t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, r.Logs[j].TxIndex, i) + } + if r.Logs[j].Index != logIndex { + t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, r.Logs[j].Index, logIndex) + } + logIndex++ + } } - for j := range receipts[i].Logs { - if receipts[i].Logs[j].BlockNumber != number.Uint64() { - t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, receipts[i].Logs[j].BlockNumber, number.Uint64()) + }) + + t.Run("DeriveV3", func(t *testing.T) { + clearComputedFieldsOnReceipts(t, receipts) + // Iterate over all the computed fields and check that they're correct + signer := MakeSigner(params.TestChainConfig, number.Uint64(), 0) + + logIndex := uint(0) + for i := range receipts { + txs[i].SetSender(libcommon.BytesToAddress([]byte{0x0})) + r, err := receipts.DeriveFieldsV3ForSingleReceipt(i, hash, number.Uint64(), txs[i]) + if err != nil { + panic(err) + } + + if r.Type != txs[i].Type() { + t.Errorf("receipts[%d].Type = %d, want %d", i, r.Type, txs[i].Type()) } - if receipts[i].Logs[j].BlockHash != hash { - t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, receipts[i].Logs[j].BlockHash.String(), hash.String()) + if r.TxHash != txs[i].Hash() { + t.Errorf("receipts[%d].TxHash = %s, want %s", i, r.TxHash.String(), txs[i].Hash().String()) } - if receipts[i].Logs[j].TxHash != txs[i].Hash() { - t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String()) + if r.BlockHash != hash { + t.Errorf("receipts[%d].BlockHash = %s, want %s", i, r.BlockHash.String(), hash.String()) } - if receipts[i].Logs[j].TxHash != txs[i].Hash() { - t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String()) + if r.BlockNumber.Cmp(number) != 0 { + t.Errorf("receipts[%c].BlockNumber = %s, want %s", i, r.BlockNumber.String(), number.String()) } - if receipts[i].Logs[j].TxIndex != uint(i) { - t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, receipts[i].Logs[j].TxIndex, i) + if r.TransactionIndex != uint(i) { + t.Errorf("receipts[%d].TransactionIndex = %d, want %d", i, r.TransactionIndex, i) } - if receipts[i].Logs[j].Index != logIndex { - t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, receipts[i].Logs[j].Index, logIndex) + if r.GasUsed != txs[i].GetGas() { + t.Errorf("receipts[%d].GasUsed = %d, want %d", i, r.GasUsed, txs[i].GetGas()) + } + if txs[i].GetTo() != nil && r.ContractAddress != (libcommon.Address{}) { + t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, r.ContractAddress.String(), (libcommon.Address{}).String()) + } + from, _ := txs[i].Sender(*signer) + contractAddress := crypto.CreateAddress(from, txs[i].GetNonce()) + if txs[i].GetTo() == nil && r.ContractAddress != contractAddress { + t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, r.ContractAddress.String(), contractAddress.String()) + } + for j := range r.Logs { + if r.Logs[j].BlockNumber != number.Uint64() { + t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, r.Logs[j].BlockNumber, number.Uint64()) + } + if r.Logs[j].BlockHash != hash { + t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, r.Logs[j].BlockHash.String(), hash.String()) + } + if r.Logs[j].TxHash != txs[i].Hash() { + t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, r.Logs[j].TxHash.String(), txs[i].Hash().String()) + } + if r.Logs[j].TxHash != txs[i].Hash() { + t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, r.Logs[j].TxHash.String(), txs[i].Hash().String()) + } + if r.Logs[j].TxIndex != uint(i) { + t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, r.Logs[j].TxIndex, i) + } + if r.Logs[j].Index != logIndex { + t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, r.Logs[j].Index, logIndex) + } + logIndex++ } - logIndex++ } - } + }) + } // TestTypedReceiptEncodingDecoding reproduces a flaw that existed in the receipt diff --git a/core/types/transaction.go b/core/types/transaction.go index fb781275283..3dabeabbb8d 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -87,6 +87,7 @@ type Transaction interface { // signing method. The cache is invalidated if the cached signer does // not match the signer used in the current call. Sender(Signer) (libcommon.Address, error) + cashedSender() (libcommon.Address, bool) GetSender() (libcommon.Address, bool) SetSender(libcommon.Address) IsContractDeploy() bool From afd5c36417fe6a3e50f893b5b61711bce9e75a91 Mon Sep 17 00:00:00 2001 From: battlmonstr Date: Mon, 13 May 2024 10:56:38 +0200 Subject: [PATCH 31/48] polygon/heimdall: scrape Heimdall data into a local DB (#10197) --- cmd/devnet/services/polygon/heimdall.go | 2 +- polygon/bor/bor_test.go | 2 +- polygon/heimdall/checkpoint.go | 7 + polygon/heimdall/client.go | 4 +- polygon/heimdall/client_mock.go | 10 +- polygon/heimdall/closed_range.go | 29 +++ polygon/heimdall/entity.go | 5 + polygon/heimdall/entity_fetcher.go | 122 +++++++++ polygon/heimdall/entity_store.go | 70 ++++++ polygon/heimdall/heimdall.go | 129 +--------- polygon/heimdall/heimdall_mock.go | 314 ------------------------ polygon/heimdall/heimdall_test.go | 2 +- polygon/heimdall/milestone.go | 7 + polygon/heimdall/scraper.go | 198 +++++++++++++++ polygon/heimdall/span.go | 7 + polygon/heimdall/store.go | 2 +- polygon/sync/service.go | 18 ++ 17 files changed, 478 insertions(+), 450 deletions(-) create mode 100644 polygon/heimdall/closed_range.go create mode 100644 polygon/heimdall/entity.go create mode 100644 polygon/heimdall/entity_fetcher.go create mode 100644 polygon/heimdall/entity_store.go create mode 100644 polygon/heimdall/scraper.go diff --git a/cmd/devnet/services/polygon/heimdall.go b/cmd/devnet/services/polygon/heimdall.go index 7a2e21c0d7d..3f77b835086 100644 --- a/cmd/devnet/services/polygon/heimdall.go +++ b/cmd/devnet/services/polygon/heimdall.go @@ -210,7 +210,7 @@ func (h *Heimdall) FetchCheckpointCount(ctx context.Context) (int64, error) { return 0, fmt.Errorf("TODO") } -func (h *Heimdall) FetchCheckpoints(ctx context.Context, page uint64, limit uint64) (heimdall.Checkpoints, error) { +func (h *Heimdall) FetchCheckpoints(ctx context.Context, page uint64, limit uint64) ([]*heimdall.Checkpoint, error) { return nil, fmt.Errorf("TODO") } diff --git a/polygon/bor/bor_test.go b/polygon/bor/bor_test.go index 194d77d4f79..3e7dfcb8b5d 100644 --- a/polygon/bor/bor_test.go +++ b/polygon/bor/bor_test.go @@ -116,7 +116,7 @@ func (h test_heimdall) FetchCheckpointCount(ctx context.Context) (int64, error) return 0, fmt.Errorf("TODO") } -func (h *test_heimdall) FetchCheckpoints(ctx context.Context, page uint64, limit uint64) (heimdall.Checkpoints, error) { +func (h *test_heimdall) FetchCheckpoints(ctx context.Context, page uint64, limit uint64) ([]*heimdall.Checkpoint, error) { return nil, fmt.Errorf("TODO") } diff --git a/polygon/heimdall/checkpoint.go b/polygon/heimdall/checkpoint.go index a1d7a763c54..88e595a5e5e 100644 --- a/polygon/heimdall/checkpoint.go +++ b/polygon/heimdall/checkpoint.go @@ -28,6 +28,13 @@ func (c Checkpoint) EndBlock() *big.Int { return c.Fields.EndBlock } +func (c Checkpoint) BlockNumRange() ClosedRange { + return ClosedRange{ + Start: c.StartBlock().Uint64(), + End: c.EndBlock().Uint64(), + } +} + func (c Checkpoint) RootHash() libcommon.Hash { return c.Fields.RootHash } diff --git a/polygon/heimdall/client.go b/polygon/heimdall/client.go index df33539aa60..4eee9e64d19 100644 --- a/polygon/heimdall/client.go +++ b/polygon/heimdall/client.go @@ -47,7 +47,7 @@ type HeimdallClient interface { FetchCheckpoint(ctx context.Context, number int64) (*Checkpoint, error) FetchCheckpointCount(ctx context.Context) (int64, error) - FetchCheckpoints(ctx context.Context, page uint64, limit uint64) (Checkpoints, error) + FetchCheckpoints(ctx context.Context, page uint64, limit uint64) ([]*Checkpoint, error) FetchMilestone(ctx context.Context, number int64) (*Milestone, error) FetchMilestoneCount(ctx context.Context) (int64, error) @@ -250,7 +250,7 @@ func (c *Client) FetchCheckpoint(ctx context.Context, number int64) (*Checkpoint return &response.Result, nil } -func (c *Client) FetchCheckpoints(ctx context.Context, page uint64, limit uint64) (Checkpoints, error) { +func (c *Client) FetchCheckpoints(ctx context.Context, page uint64, limit uint64) ([]*Checkpoint, error) { url, err := checkpointListURL(c.urlString, page, limit) if err != nil { return nil, err diff --git a/polygon/heimdall/client_mock.go b/polygon/heimdall/client_mock.go index 7d3d81c2b06..1b1718b47f7 100644 --- a/polygon/heimdall/client_mock.go +++ b/polygon/heimdall/client_mock.go @@ -155,10 +155,10 @@ func (c *MockHeimdallClientFetchCheckpointCountCall) DoAndReturn(f func(context. } // FetchCheckpoints mocks base method. -func (m *MockHeimdallClient) FetchCheckpoints(arg0 context.Context, arg1, arg2 uint64) (Checkpoints, error) { +func (m *MockHeimdallClient) FetchCheckpoints(arg0 context.Context, arg1, arg2 uint64) ([]*Checkpoint, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchCheckpoints", arg0, arg1, arg2) - ret0, _ := ret[0].(Checkpoints) + ret0, _ := ret[0].([]*Checkpoint) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -176,19 +176,19 @@ type MockHeimdallClientFetchCheckpointsCall struct { } // Return rewrite *gomock.Call.Return -func (c *MockHeimdallClientFetchCheckpointsCall) Return(arg0 Checkpoints, arg1 error) *MockHeimdallClientFetchCheckpointsCall { +func (c *MockHeimdallClientFetchCheckpointsCall) Return(arg0 []*Checkpoint, arg1 error) *MockHeimdallClientFetchCheckpointsCall { c.Call = c.Call.Return(arg0, arg1) return c } // Do rewrite *gomock.Call.Do -func (c *MockHeimdallClientFetchCheckpointsCall) Do(f func(context.Context, uint64, uint64) (Checkpoints, error)) *MockHeimdallClientFetchCheckpointsCall { +func (c *MockHeimdallClientFetchCheckpointsCall) Do(f func(context.Context, uint64, uint64) ([]*Checkpoint, error)) *MockHeimdallClientFetchCheckpointsCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockHeimdallClientFetchCheckpointsCall) DoAndReturn(f func(context.Context, uint64, uint64) (Checkpoints, error)) *MockHeimdallClientFetchCheckpointsCall { +func (c *MockHeimdallClientFetchCheckpointsCall) DoAndReturn(f func(context.Context, uint64, uint64) ([]*Checkpoint, error)) *MockHeimdallClientFetchCheckpointsCall { c.Call = c.Call.DoAndReturn(f) return c } diff --git a/polygon/heimdall/closed_range.go b/polygon/heimdall/closed_range.go new file mode 100644 index 00000000000..1c14986df26 --- /dev/null +++ b/polygon/heimdall/closed_range.go @@ -0,0 +1,29 @@ +package heimdall + +type ClosedRange struct { + Start uint64 + End uint64 +} + +func (r ClosedRange) Len() uint64 { + return r.End + 1 - r.Start +} + +func ClosedRangeMap[TResult any](r ClosedRange, projection func(i uint64) (TResult, error)) ([]TResult, error) { + results := make([]TResult, 0, r.Len()) + + for i := r.Start; i <= r.End; i++ { + entity, err := projection(i) + if err != nil { + return nil, err + } + + results = append(results, entity) + } + + return results, nil +} + +func (r ClosedRange) Map(projection func(i uint64) (any, error)) ([]any, error) { + return ClosedRangeMap(r, projection) +} diff --git a/polygon/heimdall/entity.go b/polygon/heimdall/entity.go new file mode 100644 index 00000000000..316ea09189a --- /dev/null +++ b/polygon/heimdall/entity.go @@ -0,0 +1,5 @@ +package heimdall + +type Entity interface { + BlockNumRange() ClosedRange +} diff --git a/polygon/heimdall/entity_fetcher.go b/polygon/heimdall/entity_fetcher.go new file mode 100644 index 00000000000..bb5bad50ab3 --- /dev/null +++ b/polygon/heimdall/entity_fetcher.go @@ -0,0 +1,122 @@ +package heimdall + +import ( + "cmp" + "context" + "fmt" + "slices" + "time" + + "github.com/ledgerwatch/log/v3" +) + +type entityFetcher interface { + FetchLastEntityId(ctx context.Context) (uint64, error) + FetchEntitiesRange(ctx context.Context, idRange ClosedRange) ([]Entity, error) +} + +type entityFetcherImpl struct { + name string + + fetchLastEntityId func(ctx context.Context) (int64, error) + fetchEntity func(ctx context.Context, id int64) (Entity, error) + fetchEntitiesPage func(ctx context.Context, page uint64, limit uint64) ([]Entity, error) + + logger log.Logger +} + +func newEntityFetcher( + name string, + fetchLastEntityId func(ctx context.Context) (int64, error), + fetchEntity func(ctx context.Context, id int64) (Entity, error), + fetchEntitiesPage func(ctx context.Context, page uint64, limit uint64) ([]Entity, error), + logger log.Logger, +) entityFetcher { + return &entityFetcherImpl{ + name: name, + fetchLastEntityId: fetchLastEntityId, + fetchEntity: fetchEntity, + fetchEntitiesPage: fetchEntitiesPage, + logger: logger, + } +} + +func (f *entityFetcherImpl) FetchLastEntityId(ctx context.Context) (uint64, error) { + id, err := f.fetchLastEntityId(ctx) + return uint64(id), err +} + +func (f *entityFetcherImpl) FetchEntitiesRange(ctx context.Context, idRange ClosedRange) ([]Entity, error) { + count := idRange.Len() + + const batchFetchThreshold = 100 + if (count > batchFetchThreshold) && (f.fetchEntitiesPage != nil) { + allEntities, err := f.FetchAllEntities(ctx) + if err != nil { + return nil, err + } + startIndex := idRange.Start - 1 + return allEntities[startIndex : startIndex+count], nil + } + + return f.FetchEntitiesRangeSequentially(ctx, idRange) +} + +func (f *entityFetcherImpl) FetchEntitiesRangeSequentially(ctx context.Context, idRange ClosedRange) ([]Entity, error) { + return ClosedRangeMap(idRange, func(id uint64) (Entity, error) { + return f.fetchEntity(ctx, int64(id)) + }) +} + +func (f *entityFetcherImpl) FetchAllEntities(ctx context.Context) ([]Entity, error) { + // TODO: once heimdall API is fixed to return sorted items in pages we can only fetch + // + // the new pages after lastStoredCheckpointId using the checkpoints/list paging API + // (for now we have to fetch all of them) + // and also remove sorting we do after fetching + + var entities []Entity + + fetchStartTime := time.Now() + progressLogTicker := time.NewTicker(30 * time.Second) + defer progressLogTicker.Stop() + + for page := uint64(1); ; page++ { + entitiesPage, err := f.fetchEntitiesPage(ctx, page, 10_000) + if err != nil { + return nil, err + } + if len(entitiesPage) == 0 { + break + } + + for _, entity := range entitiesPage { + entities = append(entities, entity) + } + + select { + case <-progressLogTicker.C: + f.logger.Debug( + heimdallLogPrefix(fmt.Sprintf("%s progress", f.name)), + "page", page, + "len", len(entities), + ) + default: + // carry-on + } + } + + slices.SortFunc(entities, func(e1, e2 Entity) int { + n1 := e1.BlockNumRange().Start + n2 := e2.BlockNumRange().Start + return cmp.Compare(n1, n2) + }) + + f.logger.Debug( + heimdallLogPrefix(fmt.Sprintf("%s done", f.name)), + "len", len(entities), + "duration", time.Since(fetchStartTime), + ) + + return entities, nil +} diff --git a/polygon/heimdall/entity_store.go b/polygon/heimdall/entity_store.go new file mode 100644 index 00000000000..5024b49e8dd --- /dev/null +++ b/polygon/heimdall/entity_store.go @@ -0,0 +1,70 @@ +package heimdall + +import ( + "context" + "encoding/binary" + "encoding/json" + + "github.com/ledgerwatch/erigon-lib/kv" +) + +type entityStore interface { + GetLastEntityId(ctx context.Context) (uint64, bool, error) + PutEntity(ctx context.Context, id uint64, entity Entity) error +} + +type entityStoreImpl struct { + tx kv.RwTx + table string + + makeEntity func() Entity + getLastEntityId func(ctx context.Context, tx kv.Tx) (uint64, bool, error) + loadEntityBytes func(ctx context.Context, tx kv.Getter, id uint64) ([]byte, error) +} + +func newEntityStore( + tx kv.RwTx, + table string, + makeEntity func() Entity, + getLastEntityId func(ctx context.Context, tx kv.Tx) (uint64, bool, error), + loadEntityBytes func(ctx context.Context, tx kv.Getter, id uint64) ([]byte, error), +) entityStore { + return &entityStoreImpl{ + tx: tx, + table: table, + + makeEntity: makeEntity, + getLastEntityId: getLastEntityId, + loadEntityBytes: loadEntityBytes, + } +} + +func (s *entityStoreImpl) GetLastEntityId(ctx context.Context) (uint64, bool, error) { + return s.getLastEntityId(ctx, s.tx) +} + +func (s *entityStoreImpl) GetEntity(ctx context.Context, id uint64) (Entity, error) { + jsonBytes, err := s.loadEntityBytes(ctx, s.tx, id) + if err != nil { + return nil, err + } + + entity := s.makeEntity() + if err := json.Unmarshal(jsonBytes, entity); err != nil { + return nil, err + } + + return entity, nil +} + +func (s *entityStoreImpl) PutEntity(_ context.Context, id uint64, entity Entity) error { + jsonBytes, err := json.Marshal(entity) + if err != nil { + return err + } + + var idBytes [8]byte + binary.BigEndian.PutUint64(idBytes[:], id) + + return s.tx.Put(s.table, idBytes[:], jsonBytes) +} diff --git a/polygon/heimdall/heimdall.go b/polygon/heimdall/heimdall.go index 4d2b12c0554..d560994c5cc 100644 --- a/polygon/heimdall/heimdall.go +++ b/polygon/heimdall/heimdall.go @@ -15,20 +15,11 @@ import ( // //go:generate mockgen -typed=true -destination=./heimdall_mock.go -package=heimdall . Heimdall type Heimdall interface { - LastCheckpointId(ctx context.Context) (CheckpointId, bool, error) - LastMilestoneId(ctx context.Context) (MilestoneId, bool, error) - LastSpanId(ctx context.Context) (SpanId, bool, error) FetchLatestSpan(ctx context.Context) (*Span, error) - FetchCheckpoints(ctx context.Context, start CheckpointId, end CheckpointId) ([]*Checkpoint, error) - FetchMilestones(ctx context.Context, start MilestoneId, end MilestoneId) ([]*Milestone, error) - FetchSpans(ctx context.Context, start SpanId, end SpanId) ([]*Span, error) - FetchCheckpointsFromBlock(ctx context.Context, startBlock uint64) (Waypoints, error) FetchMilestonesFromBlock(ctx context.Context, startBlock uint64) (Waypoints, error) - FetchSpansFromBlock(ctx context.Context, startBlock uint64) ([]*Span, error) - OnCheckpointEvent(ctx context.Context, callback func(*Checkpoint)) error OnMilestoneEvent(ctx context.Context, callback func(*Milestone)) error OnSpanEvent(ctx context.Context, callback func(*Span)) error } @@ -36,7 +27,6 @@ type Heimdall interface { // ErrIncompleteMilestoneRange happens when FetchMilestones is called with an old start block because old milestones are evicted var ErrIncompleteMilestoneRange = errors.New("milestone range doesn't contain the start block") var ErrIncompleteCheckpointRange = errors.New("checkpoint range doesn't contain the start block") -var ErrIncompleteSpanRange = errors.New("span range doesn't contain the start block") const checkpointsBatchFetchThreshold = 100 @@ -70,18 +60,6 @@ type heimdall struct { store Store } -func (h *heimdall) LastCheckpointId(ctx context.Context) (CheckpointId, bool, error) { - // todo get this from store if its likely not changed (need timeout) - - count, err := h.client.FetchCheckpointCount(ctx) - - if err != nil { - return 0, false, err - } - - return CheckpointId(count), true, nil -} - func (h *heimdall) FetchCheckpointsFromBlock(ctx context.Context, startBlock uint64) (Waypoints, error) { h.logger.Debug(heimdallLogPrefix("fetching checkpoints from block"), "start", startBlock) startFetchTime := time.Now() @@ -361,43 +339,6 @@ func (h *heimdall) FetchLatestSpan(ctx context.Context) (*Span, error) { return h.client.FetchLatestSpan(ctx) } -func (h *heimdall) FetchSpansFromBlock(ctx context.Context, startBlock uint64) ([]*Span, error) { - last, _, err := h.LastSpanId(ctx) - - if err != nil { - return nil, err - } - - var spans []*Span - - for i := last; i >= 1; i-- { - m, err := h.FetchSpans(ctx, i, i) - if err != nil { - if errors.Is(err, ErrNotInSpanList) { - common.SliceReverse(spans) - return spans, ErrIncompleteSpanRange - } - return nil, err - } - - cmpResult := m[0].CmpRange(startBlock) - // the start block is past the last span - if cmpResult > 0 { - return nil, nil - } - - spans = append(spans, m...) - - // the checkpoint contains the start block - if cmpResult == 0 { - break - } - } - - common.SliceReverse(spans) - return spans, nil -} - func (h *heimdall) FetchSpans(ctx context.Context, start SpanId, end SpanId) ([]*Span, error) { var spans []*Span @@ -467,7 +408,7 @@ func (h *heimdall) pollSpans(ctx context.Context, tip SpanId, cb func(*Span)) { latestSpan, err := h.client.FetchLatestSpan(ctx) if err != nil { h.logger.Warn( - heimdallLogPrefix("heimdall.OnSpanEvent FetchSpanCount failed"), + heimdallLogPrefix("heimdall.OnSpanEvent FetchLatestSpan failed"), "err", err, ) @@ -484,7 +425,7 @@ func (h *heimdall) pollSpans(ctx context.Context, tip SpanId, cb func(*Span)) { m, err := h.FetchSpans(ctx, tip+1, latestSpan.Id) if err != nil { h.logger.Warn( - heimdallLogPrefix("heimdall.OnSpanEvent FetchSpan failed"), + heimdallLogPrefix("heimdall.OnSpanEvent FetchSpans failed"), "err", err, ) @@ -498,60 +439,6 @@ func (h *heimdall) pollSpans(ctx context.Context, tip SpanId, cb func(*Span)) { } } -func (h *heimdall) OnCheckpointEvent(ctx context.Context, cb func(*Checkpoint)) error { - tip, ok, err := h.store.LastCheckpointId(ctx) - if err != nil { - return err - } - - if !ok { - tip, _, err = h.LastCheckpointId(ctx) - if err != nil { - return err - } - } - - go h.pollCheckpoints(ctx, tip, cb) - - return nil -} - -func (h *heimdall) pollCheckpoints(ctx context.Context, tip CheckpointId, cb func(*Checkpoint)) { - for ctx.Err() == nil { - count, err := h.client.FetchCheckpointCount(ctx) - if err != nil { - h.logger.Warn( - heimdallLogPrefix("OnCheckpointEvent.OnCheckpointEvent FetchCheckpointCount failed"), - "err", err, - ) - - h.waitPollingDelay(ctx) - // keep background goroutine alive in case of heimdall errors - continue - } - - if count <= int64(tip) { - h.waitPollingDelay(ctx) - continue - } - - m, err := h.FetchCheckpoints(ctx, tip+1, CheckpointId(count)) - if err != nil { - h.logger.Warn( - heimdallLogPrefix("heimdall.OnCheckpointEvent FetchCheckpoints failed"), - "err", err, - ) - - h.waitPollingDelay(ctx) - // keep background goroutine alive in case of heimdall errors - continue - } - - tip = CheckpointId(count) - go cb(m[len(m)-1]) - } -} - func (h *heimdall) OnMilestoneEvent(ctx context.Context, cb func(*Milestone)) error { tip, ok, err := h.store.LastMilestoneId(ctx) if err != nil { @@ -592,7 +479,7 @@ func (h *heimdall) pollMilestones(ctx context.Context, tip MilestoneId, cb func( m, err := h.FetchMilestones(ctx, tip+1, MilestoneId(count)) if err != nil { h.logger.Warn( - heimdallLogPrefix("heimdall.OnMilestoneEvent FetchMilestone failed"), + heimdallLogPrefix("heimdall.OnMilestoneEvent FetchMilestones failed"), "err", err, ) @@ -667,13 +554,5 @@ func (h *heimdall) batchFetchCheckpoints( } func (h *heimdall) waitPollingDelay(ctx context.Context) { - pollDelayTimer := time.NewTimer(h.pollDelay) - defer pollDelayTimer.Stop() - - select { - case <-ctx.Done(): - return - case <-pollDelayTimer.C: - return - } + common.Sleep(ctx, h.pollDelay) } diff --git a/polygon/heimdall/heimdall_mock.go b/polygon/heimdall/heimdall_mock.go index 646cd97debb..1b037cd8aa2 100644 --- a/polygon/heimdall/heimdall_mock.go +++ b/polygon/heimdall/heimdall_mock.go @@ -39,45 +39,6 @@ func (m *MockHeimdall) EXPECT() *MockHeimdallMockRecorder { return m.recorder } -// FetchCheckpoints mocks base method. -func (m *MockHeimdall) FetchCheckpoints(arg0 context.Context, arg1, arg2 CheckpointId) ([]*Checkpoint, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchCheckpoints", arg0, arg1, arg2) - ret0, _ := ret[0].([]*Checkpoint) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchCheckpoints indicates an expected call of FetchCheckpoints. -func (mr *MockHeimdallMockRecorder) FetchCheckpoints(arg0, arg1, arg2 any) *MockHeimdallFetchCheckpointsCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchCheckpoints", reflect.TypeOf((*MockHeimdall)(nil).FetchCheckpoints), arg0, arg1, arg2) - return &MockHeimdallFetchCheckpointsCall{Call: call} -} - -// MockHeimdallFetchCheckpointsCall wrap *gomock.Call -type MockHeimdallFetchCheckpointsCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockHeimdallFetchCheckpointsCall) Return(arg0 []*Checkpoint, arg1 error) *MockHeimdallFetchCheckpointsCall { - c.Call = c.Call.Return(arg0, arg1) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockHeimdallFetchCheckpointsCall) Do(f func(context.Context, CheckpointId, CheckpointId) ([]*Checkpoint, error)) *MockHeimdallFetchCheckpointsCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockHeimdallFetchCheckpointsCall) DoAndReturn(f func(context.Context, CheckpointId, CheckpointId) ([]*Checkpoint, error)) *MockHeimdallFetchCheckpointsCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - // FetchCheckpointsFromBlock mocks base method. func (m *MockHeimdall) FetchCheckpointsFromBlock(arg0 context.Context, arg1 uint64) (Waypoints, error) { m.ctrl.T.Helper() @@ -156,45 +117,6 @@ func (c *MockHeimdallFetchLatestSpanCall) DoAndReturn(f func(context.Context) (* return c } -// FetchMilestones mocks base method. -func (m *MockHeimdall) FetchMilestones(arg0 context.Context, arg1, arg2 MilestoneId) ([]*Milestone, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchMilestones", arg0, arg1, arg2) - ret0, _ := ret[0].([]*Milestone) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchMilestones indicates an expected call of FetchMilestones. -func (mr *MockHeimdallMockRecorder) FetchMilestones(arg0, arg1, arg2 any) *MockHeimdallFetchMilestonesCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchMilestones", reflect.TypeOf((*MockHeimdall)(nil).FetchMilestones), arg0, arg1, arg2) - return &MockHeimdallFetchMilestonesCall{Call: call} -} - -// MockHeimdallFetchMilestonesCall wrap *gomock.Call -type MockHeimdallFetchMilestonesCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockHeimdallFetchMilestonesCall) Return(arg0 []*Milestone, arg1 error) *MockHeimdallFetchMilestonesCall { - c.Call = c.Call.Return(arg0, arg1) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockHeimdallFetchMilestonesCall) Do(f func(context.Context, MilestoneId, MilestoneId) ([]*Milestone, error)) *MockHeimdallFetchMilestonesCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockHeimdallFetchMilestonesCall) DoAndReturn(f func(context.Context, MilestoneId, MilestoneId) ([]*Milestone, error)) *MockHeimdallFetchMilestonesCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - // FetchMilestonesFromBlock mocks base method. func (m *MockHeimdall) FetchMilestonesFromBlock(arg0 context.Context, arg1 uint64) (Waypoints, error) { m.ctrl.T.Helper() @@ -234,242 +156,6 @@ func (c *MockHeimdallFetchMilestonesFromBlockCall) DoAndReturn(f func(context.Co return c } -// FetchSpans mocks base method. -func (m *MockHeimdall) FetchSpans(arg0 context.Context, arg1, arg2 SpanId) ([]*Span, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchSpans", arg0, arg1, arg2) - ret0, _ := ret[0].([]*Span) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchSpans indicates an expected call of FetchSpans. -func (mr *MockHeimdallMockRecorder) FetchSpans(arg0, arg1, arg2 any) *MockHeimdallFetchSpansCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchSpans", reflect.TypeOf((*MockHeimdall)(nil).FetchSpans), arg0, arg1, arg2) - return &MockHeimdallFetchSpansCall{Call: call} -} - -// MockHeimdallFetchSpansCall wrap *gomock.Call -type MockHeimdallFetchSpansCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockHeimdallFetchSpansCall) Return(arg0 []*Span, arg1 error) *MockHeimdallFetchSpansCall { - c.Call = c.Call.Return(arg0, arg1) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockHeimdallFetchSpansCall) Do(f func(context.Context, SpanId, SpanId) ([]*Span, error)) *MockHeimdallFetchSpansCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockHeimdallFetchSpansCall) DoAndReturn(f func(context.Context, SpanId, SpanId) ([]*Span, error)) *MockHeimdallFetchSpansCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// FetchSpansFromBlock mocks base method. -func (m *MockHeimdall) FetchSpansFromBlock(arg0 context.Context, arg1 uint64) ([]*Span, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchSpansFromBlock", arg0, arg1) - ret0, _ := ret[0].([]*Span) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchSpansFromBlock indicates an expected call of FetchSpansFromBlock. -func (mr *MockHeimdallMockRecorder) FetchSpansFromBlock(arg0, arg1 any) *MockHeimdallFetchSpansFromBlockCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchSpansFromBlock", reflect.TypeOf((*MockHeimdall)(nil).FetchSpansFromBlock), arg0, arg1) - return &MockHeimdallFetchSpansFromBlockCall{Call: call} -} - -// MockHeimdallFetchSpansFromBlockCall wrap *gomock.Call -type MockHeimdallFetchSpansFromBlockCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockHeimdallFetchSpansFromBlockCall) Return(arg0 []*Span, arg1 error) *MockHeimdallFetchSpansFromBlockCall { - c.Call = c.Call.Return(arg0, arg1) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockHeimdallFetchSpansFromBlockCall) Do(f func(context.Context, uint64) ([]*Span, error)) *MockHeimdallFetchSpansFromBlockCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockHeimdallFetchSpansFromBlockCall) DoAndReturn(f func(context.Context, uint64) ([]*Span, error)) *MockHeimdallFetchSpansFromBlockCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// LastCheckpointId mocks base method. -func (m *MockHeimdall) LastCheckpointId(arg0 context.Context) (CheckpointId, bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LastCheckpointId", arg0) - ret0, _ := ret[0].(CheckpointId) - ret1, _ := ret[1].(bool) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// LastCheckpointId indicates an expected call of LastCheckpointId. -func (mr *MockHeimdallMockRecorder) LastCheckpointId(arg0 any) *MockHeimdallLastCheckpointIdCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastCheckpointId", reflect.TypeOf((*MockHeimdall)(nil).LastCheckpointId), arg0) - return &MockHeimdallLastCheckpointIdCall{Call: call} -} - -// MockHeimdallLastCheckpointIdCall wrap *gomock.Call -type MockHeimdallLastCheckpointIdCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockHeimdallLastCheckpointIdCall) Return(arg0 CheckpointId, arg1 bool, arg2 error) *MockHeimdallLastCheckpointIdCall { - c.Call = c.Call.Return(arg0, arg1, arg2) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockHeimdallLastCheckpointIdCall) Do(f func(context.Context) (CheckpointId, bool, error)) *MockHeimdallLastCheckpointIdCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockHeimdallLastCheckpointIdCall) DoAndReturn(f func(context.Context) (CheckpointId, bool, error)) *MockHeimdallLastCheckpointIdCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// LastMilestoneId mocks base method. -func (m *MockHeimdall) LastMilestoneId(arg0 context.Context) (MilestoneId, bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LastMilestoneId", arg0) - ret0, _ := ret[0].(MilestoneId) - ret1, _ := ret[1].(bool) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// LastMilestoneId indicates an expected call of LastMilestoneId. -func (mr *MockHeimdallMockRecorder) LastMilestoneId(arg0 any) *MockHeimdallLastMilestoneIdCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastMilestoneId", reflect.TypeOf((*MockHeimdall)(nil).LastMilestoneId), arg0) - return &MockHeimdallLastMilestoneIdCall{Call: call} -} - -// MockHeimdallLastMilestoneIdCall wrap *gomock.Call -type MockHeimdallLastMilestoneIdCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockHeimdallLastMilestoneIdCall) Return(arg0 MilestoneId, arg1 bool, arg2 error) *MockHeimdallLastMilestoneIdCall { - c.Call = c.Call.Return(arg0, arg1, arg2) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockHeimdallLastMilestoneIdCall) Do(f func(context.Context) (MilestoneId, bool, error)) *MockHeimdallLastMilestoneIdCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockHeimdallLastMilestoneIdCall) DoAndReturn(f func(context.Context) (MilestoneId, bool, error)) *MockHeimdallLastMilestoneIdCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// LastSpanId mocks base method. -func (m *MockHeimdall) LastSpanId(arg0 context.Context) (SpanId, bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LastSpanId", arg0) - ret0, _ := ret[0].(SpanId) - ret1, _ := ret[1].(bool) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// LastSpanId indicates an expected call of LastSpanId. -func (mr *MockHeimdallMockRecorder) LastSpanId(arg0 any) *MockHeimdallLastSpanIdCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastSpanId", reflect.TypeOf((*MockHeimdall)(nil).LastSpanId), arg0) - return &MockHeimdallLastSpanIdCall{Call: call} -} - -// MockHeimdallLastSpanIdCall wrap *gomock.Call -type MockHeimdallLastSpanIdCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockHeimdallLastSpanIdCall) Return(arg0 SpanId, arg1 bool, arg2 error) *MockHeimdallLastSpanIdCall { - c.Call = c.Call.Return(arg0, arg1, arg2) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockHeimdallLastSpanIdCall) Do(f func(context.Context) (SpanId, bool, error)) *MockHeimdallLastSpanIdCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockHeimdallLastSpanIdCall) DoAndReturn(f func(context.Context) (SpanId, bool, error)) *MockHeimdallLastSpanIdCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// OnCheckpointEvent mocks base method. -func (m *MockHeimdall) OnCheckpointEvent(arg0 context.Context, arg1 func(*Checkpoint)) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "OnCheckpointEvent", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// OnCheckpointEvent indicates an expected call of OnCheckpointEvent. -func (mr *MockHeimdallMockRecorder) OnCheckpointEvent(arg0, arg1 any) *MockHeimdallOnCheckpointEventCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnCheckpointEvent", reflect.TypeOf((*MockHeimdall)(nil).OnCheckpointEvent), arg0, arg1) - return &MockHeimdallOnCheckpointEventCall{Call: call} -} - -// MockHeimdallOnCheckpointEventCall wrap *gomock.Call -type MockHeimdallOnCheckpointEventCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockHeimdallOnCheckpointEventCall) Return(arg0 error) *MockHeimdallOnCheckpointEventCall { - c.Call = c.Call.Return(arg0) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockHeimdallOnCheckpointEventCall) Do(f func(context.Context, func(*Checkpoint)) error) *MockHeimdallOnCheckpointEventCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockHeimdallOnCheckpointEventCall) DoAndReturn(f func(context.Context, func(*Checkpoint)) error) *MockHeimdallOnCheckpointEventCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - // OnMilestoneEvent mocks base method. func (m *MockHeimdall) OnMilestoneEvent(arg0 context.Context, arg1 func(*Milestone)) error { m.ctrl.T.Helper() diff --git a/polygon/heimdall/heimdall_test.go b/polygon/heimdall/heimdall_test.go index 43734e264bd..74192bcd1df 100644 --- a/polygon/heimdall/heimdall_test.go +++ b/polygon/heimdall/heimdall_test.go @@ -89,7 +89,7 @@ func (test heimdallTest) setupCheckpoints(count int) []*Checkpoint { } else { client.EXPECT(). FetchCheckpoints(gomock.Any(), gomock.Any(), gomock.Any()). - DoAndReturn(func(ctx context.Context, page uint64, limit uint64) (Checkpoints, error) { + DoAndReturn(func(ctx context.Context, page uint64, limit uint64) ([]*Checkpoint, error) { if page == 0 { return nil, nil } diff --git a/polygon/heimdall/milestone.go b/polygon/heimdall/milestone.go index 12e64c00243..7ffc246d33b 100644 --- a/polygon/heimdall/milestone.go +++ b/polygon/heimdall/milestone.go @@ -28,6 +28,13 @@ func (m Milestone) EndBlock() *big.Int { return m.Fields.EndBlock } +func (m Milestone) BlockNumRange() ClosedRange { + return ClosedRange{ + Start: m.StartBlock().Uint64(), + End: m.EndBlock().Uint64(), + } +} + func (m Milestone) RootHash() libcommon.Hash { return m.Fields.RootHash } diff --git a/polygon/heimdall/scraper.go b/polygon/heimdall/scraper.go new file mode 100644 index 00000000000..58d47ce3fb3 --- /dev/null +++ b/polygon/heimdall/scraper.go @@ -0,0 +1,198 @@ +package heimdall + +import ( + "context" + "time" + + "github.com/ledgerwatch/log/v3" + "golang.org/x/sync/errgroup" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/turbo/services" +) + +type Scraper struct { + txProvider func() kv.RwTx + readerProvider func() reader + + client HeimdallClient + pollDelay time.Duration + logger log.Logger +} + +func NewScraperTODO( + client HeimdallClient, + pollDelay time.Duration, + logger log.Logger, +) *Scraper { + return NewScraper( + func() kv.RwTx { /* TODO */ return nil }, + func() reader { /* TODO */ return nil }, + client, + pollDelay, + logger, + ) +} + +func NewScraper( + txProvider func() kv.RwTx, + readerProvider func() reader, + + client HeimdallClient, + pollDelay time.Duration, + logger log.Logger, +) *Scraper { + return &Scraper{ + txProvider: txProvider, + readerProvider: readerProvider, + + client: client, + pollDelay: pollDelay, + logger: logger, + } +} + +func (s *Scraper) syncEntity( + ctx context.Context, + store entityStore, + fetcher entityFetcher, + callback func(ClosedRange), +) error { + for ctx.Err() == nil { + lastKnownId, hasLastKnownId, err := store.GetLastEntityId(ctx) + if err != nil { + return err + } + + var idRange ClosedRange + if hasLastKnownId { + idRange.Start = lastKnownId + 1 + } else { + idRange.Start = 1 + } + + idRange.End, err = fetcher.FetchLastEntityId(ctx) + if err != nil { + return err + } + + if idRange.Start > idRange.End { + libcommon.Sleep(ctx, s.pollDelay) + } else { + entities, err := fetcher.FetchEntitiesRange(ctx, idRange) + if err != nil { + return err + } + + for i, entity := range entities { + if err = store.PutEntity(ctx, idRange.Start+uint64(i), entity); err != nil { + return err + } + } + + if callback != nil { + go callback(idRange) + } + } + } + return ctx.Err() +} + +func newCheckpointStore(tx kv.RwTx, reader services.BorCheckpointReader) entityStore { + makeEntity := func() Entity { return new(Checkpoint) } + return newEntityStore(tx, kv.BorCheckpoints, makeEntity, reader.LastCheckpointId, reader.Checkpoint) +} + +func newMilestoneStore(tx kv.RwTx, reader services.BorMilestoneReader) entityStore { + makeEntity := func() Entity { return new(Milestone) } + return newEntityStore(tx, kv.BorMilestones, makeEntity, reader.LastMilestoneId, reader.Milestone) +} + +func newSpanStore(tx kv.RwTx, reader services.BorSpanReader) entityStore { + makeEntity := func() Entity { return new(Span) } + return newEntityStore(tx, kv.BorSpans, makeEntity, reader.LastSpanId, reader.Span) +} + +func newCheckpointFetcher(client HeimdallClient, logger log.Logger) entityFetcher { + fetchEntity := func(ctx context.Context, id int64) (Entity, error) { return client.FetchCheckpoint(ctx, id) } + + fetchEntitiesPage := func(ctx context.Context, page uint64, limit uint64) ([]Entity, error) { + entities, err := client.FetchCheckpoints(ctx, page, limit) + return libcommon.SliceMap(entities, func(c *Checkpoint) Entity { return c }), err + } + + return newEntityFetcher( + "CheckpointFetcher", + client.FetchCheckpointCount, + fetchEntity, + fetchEntitiesPage, + logger, + ) +} + +func newMilestoneFetcher(client HeimdallClient, logger log.Logger) entityFetcher { + fetchEntity := func(ctx context.Context, id int64) (Entity, error) { return client.FetchMilestone(ctx, id) } + + return newEntityFetcher( + "MilestoneFetcher", + client.FetchMilestoneCount, + fetchEntity, + nil, + logger, + ) +} + +func newSpanFetcher(client HeimdallClient, logger log.Logger) entityFetcher { + fetchLastEntityId := func(ctx context.Context) (int64, error) { + span, err := client.FetchLatestSpan(ctx) + if err != nil { + return 0, err + } + return int64(span.Id), nil + } + + fetchEntity := func(ctx context.Context, id int64) (Entity, error) { + return client.FetchSpan(ctx, uint64(id)) + } + + return newEntityFetcher( + "SpanFetcher", + fetchLastEntityId, + fetchEntity, + nil, + logger, + ) +} + +func (s *Scraper) Run(parentCtx context.Context) error { + tx := s.txProvider() + if tx == nil { + // TODO: implement and remove + s.logger.Warn("heimdall.Scraper txProvider is not implemented yet") + return nil + } + reader := s.readerProvider() + if reader == nil { + // TODO: implement and remove + s.logger.Warn("heimdall.Scraper readerProvider is not implemented yet") + return nil + } + + group, ctx := errgroup.WithContext(parentCtx) + + // sync checkpoints + group.Go(func() error { + return s.syncEntity(ctx, newCheckpointStore(tx, reader), newCheckpointFetcher(s.client, s.logger), nil /* TODO */) + }) + // sync milestones + group.Go(func() error { + return s.syncEntity(ctx, newMilestoneStore(tx, reader), newMilestoneFetcher(s.client, s.logger), nil /* TODO */) + }) + // sync spans + group.Go(func() error { + return s.syncEntity(ctx, newSpanStore(tx, reader), newSpanFetcher(s.client, s.logger), nil /* TODO */) + }) + + return group.Wait() +} diff --git a/polygon/heimdall/span.go b/polygon/heimdall/span.go index 10c36998c1f..297e8aa75c9 100644 --- a/polygon/heimdall/span.go +++ b/polygon/heimdall/span.go @@ -15,6 +15,13 @@ type Span struct { ChainID string `json:"bor_chain_id,omitempty" yaml:"bor_chain_id"` } +func (s *Span) BlockNumRange() ClosedRange { + return ClosedRange{ + Start: s.StartBlock, + End: s.EndBlock, + } +} + func (hs *Span) Less(other btree.Item) bool { otherHs := other.(*Span) if hs.EndBlock == 0 || otherHs.EndBlock == 0 { diff --git a/polygon/heimdall/store.go b/polygon/heimdall/store.go index f354fc7298f..0177a1ba93c 100644 --- a/polygon/heimdall/store.go +++ b/polygon/heimdall/store.go @@ -129,7 +129,7 @@ func (s txReadStore) LastCheckpointId(ctx context.Context) (CheckpointId, bool, } func (s txReadStore) GetCheckpoint(ctx context.Context, checkpointId CheckpointId) (*Checkpoint, error) { - checkpointBytes, err := s.reader.Milestone(ctx, s.tx, uint64(checkpointId)) + checkpointBytes, err := s.reader.Checkpoint(ctx, s.tx, uint64(checkpointId)) if err != nil { return nil, err } diff --git a/polygon/sync/service.go b/polygon/sync/service.go index eb7bcce6826..531a390ac14 100644 --- a/polygon/sync/service.go +++ b/polygon/sync/service.go @@ -2,6 +2,7 @@ package sync import ( "context" + "time" lru "github.com/hashicorp/golang-lru/arc/v2" "github.com/ledgerwatch/log/v3" @@ -28,6 +29,8 @@ type service struct { p2pService p2p.Service store Store events *TipEvents + + heimdallScraper *heimdall.Scraper } func NewService( @@ -47,6 +50,11 @@ func NewService( p2pService := p2p.NewService(maxPeers, logger, sentryClient, statusDataProvider.GetStatusData) heimdallClient := heimdall.NewHeimdallClient(heimdallUrl, logger) heimdallService := heimdall.NewHeimdall(heimdallClient, logger) + heimdallScraper := heimdall.NewScraperTODO( + heimdallClient, + 1*time.Second, + logger, + ) blockDownloader := NewBlockDownloader( logger, p2pService, @@ -95,6 +103,8 @@ func NewService( p2pService: p2pService, store: store, events: events, + + heimdallScraper: heimdallScraper, } } @@ -124,6 +134,14 @@ func (s *service) Run(ctx context.Context) error { } }() + go func() { + err := s.heimdallScraper.Run(ctx) + if (err != nil) && (ctx.Err() == nil) { + serviceErr = err + cancel() + } + }() + go func() { err := s.sync.Run(ctx) if (err != nil) && (ctx.Err() == nil) { From 1a6bb6eae62489dc9c56c797144d0649ef531897 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 13 May 2024 17:37:42 +0700 Subject: [PATCH 32/48] buildIndex: reset buf on recsplit collision (#10292) --- erigon-lib/state/domain.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/erigon-lib/state/domain.go b/erigon-lib/state/domain.go index 59e5ee2d4ff..d0934f4a372 100644 --- a/erigon-lib/state/domain.go +++ b/erigon-lib/state/domain.go @@ -1134,9 +1134,9 @@ func buildIndex(ctx context.Context, d *seg.Decompressor, compressed FileCompres rs.DisableFsync() } - word := make([]byte, 0, 256) var keyPos, valPos uint64 for { + word := make([]byte, 0, 256) if err := ctx.Err(); err != nil { return err } From ed7209735d839c964d58506a3e59329501667712 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 13 May 2024 17:38:20 +0700 Subject: [PATCH 33/48] Update README.md (#10282) --- README.md | 39 +++++++++++++++++++++++++++++++-------- 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index d6a6efd23fd..54fca1c2cd4 100644 --- a/README.md +++ b/README.md @@ -329,7 +329,8 @@ Engine API. #### Caplin's Usage. -Caplin is be enabled by default. to disable it and enable the Engine API, use the `--externalcl` flag. from that point on, an external Consensus Layer will not be need +Caplin is be enabled by default. to disable it and enable the Engine API, use the `--externalcl` flag. from that point +on, an external Consensus Layer will not be need anymore. Caplin also has an archivial mode for historical states and blocks. it can be enabled through the `--caplin.archive` @@ -602,7 +603,6 @@ In order to configure the ports, use: |-----------|------|----------|---------|---------------| | REST | 5555 | TCP | REST | Public | - #### `shared` ports | Component | Port | Protocol | Purpose | Should Expose | @@ -786,12 +786,14 @@ Supported networks: all (except Mumbai). stage_trace_index - E3 can execute 1 historical transaction - without executing it's block - because history/indices have transaction-granularity, instead of block-granularity. -- Doesn't store Receipts/Logs - it always re-executing historical transactions - but re-execution is cheaper (see point - above). We would like to see how it will impact users - welcome feedback. Likely we will try add some small LRU-cache +- E3 doesn't store Logs (aka Receipts) - it always re-executing historical txn (but it's cheaper then in E2 - see point + above). Also Logs LRU added in E2 (release/2.60) and E3: https://github.com/ledgerwatch/erigon/pull/10112 here. Likely later we will add optional flag "to persist receipts". -- More cold-start-friendly and os-pre-fetch-friendly. -- datadir/chaindata is small now - to prevent it's grow: we recommend set --batchSize <= 1G. Probably 512mb is - enough. +- `--sync.loop.block.limit` is enabled by default. (Default: `2_000`. + Set `--sync.loop.block.limit=10_000_000 --batchSize=1g` to increase sync speed on good hardware). +- datadir/chaindata is small now - to prevent it's grow: we recommend set `--batchSize <= 1G`. And it's fine + to `rm -rf chaindata` +- can symlink/mount latest state to fast drive and history to cheap drive ### E3 datadir structure @@ -808,7 +810,7 @@ datadir # There is 4 domains: account, storage, code, commitment ``` -### E3 can store state on fast disk and history on slow disk +### E3 can store state on fast disk and history on cheap disk If you can afford store datadir on 1 nvme-raid - great. If can't - it's possible to store history on cheap drive. @@ -865,3 +867,24 @@ du -hsc /erigon/snapshots/* 1.3T /erigon/snapshots/idx 3.7T total ``` + +### E3 other perf trics + +- `--sync.loop.block.limit=10_000_000 --batchSize=1g` - likely will help for sync speed. +- on cloud-drives (good throughput, bad latency) - can enable OS's brain to pre-fetch some data (`madv_normal` instead + of `madv_random`). For `snapshots/domain` folder (latest + state) `KV_MADV_NORMAL_NO_LAST_LVL=accounts,storage,commitment` (or if have enough + RAM: `KV_MADV_NORMAL=accounts,storage,commitment`). For `chaindata` folder (latest updates) `MDBX_READAHEAD=true`. + For all files - `SNAPSHOT_MADV_RND=false` + +- can lock latest state in RAM - to prevent from eviction (node may face high historical RPC traffic without impacting + Chain-Tip perf): + +``` +vmtouch -vdlw /mnt/erigon/snapshots/domain/*bt +ls /mnt/erigon/snapshots/domain/*.kv | parallel vmtouch -vdlw + +# if it failing with "can't allocate memory", try: +sync && sudo sysctl vm.drop_caches=3 +echo 1 > /proc/sys/vm/compact_memory +``` From 4c18769695a8901b975cd37eaee865a27e77aab8 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 13 May 2024 17:45:40 +0700 Subject: [PATCH 34/48] e3: general HistoryRange method in aggTx (#10284) also: - rename `max` to `_max` - remove invertedIndex existence index support --- erigon-lib/kv/temporal/kv_temporal.go | 38 ++----- erigon-lib/state/aggregator.go | 50 +++++---- erigon-lib/state/domain.go | 11 +- erigon-lib/state/domain_shared.go | 11 +- erigon-lib/state/domain_test.go | 1 - erigon-lib/state/history.go | 14 +-- erigon-lib/state/inverted_index.go | 131 +++--------------------- erigon-lib/state/inverted_index_test.go | 4 +- erigon-lib/state/merge.go | 47 ++++----- erigon-lib/state/merge_test.go | 1 - 10 files changed, 80 insertions(+), 228 deletions(-) diff --git a/erigon-lib/kv/temporal/kv_temporal.go b/erigon-lib/kv/temporal/kv_temporal.go index f9acad0e70e..2b7d3f4ba16 100644 --- a/erigon-lib/kv/temporal/kv_temporal.go +++ b/erigon-lib/kv/temporal/kv_temporal.go @@ -2,7 +2,6 @@ package temporal import ( "context" - "fmt" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" @@ -183,14 +182,12 @@ func (tx *Tx) Commit() error { return mdbxTx.Commit() } -func (tx *Tx) DomainRange(name kv.Domain, fromKey, toKey []byte, asOfTs uint64, asc order.By, limit int) (it iter.KV, err error) { - it, err = tx.aggCtx.DomainRange(tx.MdbxTx, name, fromKey, toKey, asOfTs, asc, limit) +func (tx *Tx) DomainRange(name kv.Domain, fromKey, toKey []byte, asOfTs uint64, asc order.By, limit int) (iter.KV, error) { + it, err := tx.aggCtx.DomainRange(tx.MdbxTx, name, fromKey, toKey, asOfTs, asc, limit) if err != nil { return nil, err } - if closer, ok := it.(kv.Closer); ok { - tx.resourcesToClose = append(tx.resourcesToClose, closer) - } + tx.resourcesToClose = append(tx.resourcesToClose, it) return it, nil } @@ -220,34 +217,15 @@ func (tx *Tx) IndexRange(name kv.InvertedIdx, k []byte, fromTs, toTs int, asc or if err != nil { return nil, err } - if closer, ok := timestamps.(kv.Closer); ok { - tx.resourcesToClose = append(tx.resourcesToClose, closer) - } + tx.resourcesToClose = append(tx.resourcesToClose, timestamps) return timestamps, nil } -func (tx *Tx) HistoryRange(name kv.History, fromTs, toTs int, asc order.By, limit int) (it iter.KV, err error) { - if asc == order.Desc { - panic("not implemented yet") - } - if limit >= 0 { - panic("not implemented yet") - } - switch name { - case kv.AccountsHistory: - it, err = tx.aggCtx.AccountHistoryRange(fromTs, toTs, asc, limit, tx) - case kv.StorageHistory: - it, err = tx.aggCtx.StorageHistoryRange(fromTs, toTs, asc, limit, tx) - case kv.CodeHistory: - it, err = tx.aggCtx.CodeHistoryRange(fromTs, toTs, asc, limit, tx) - default: - return nil, fmt.Errorf("unexpected history name: %s", name) - } +func (tx *Tx) HistoryRange(name kv.History, fromTs, toTs int, asc order.By, limit int) (iter.KV, error) { + it, err := tx.aggCtx.HistoryRange(name, fromTs, toTs, asc, limit, tx.MdbxTx) if err != nil { return nil, err } - if closer, ok := it.(kv.Closer); ok { - tx.resourcesToClose = append(tx.resourcesToClose, closer) - } - return it, err + tx.resourcesToClose = append(tx.resourcesToClose, it) + return it, nil } diff --git a/erigon-lib/state/aggregator.go b/erigon-lib/state/aggregator.go index 5c4f758b52c..b5041647000 100644 --- a/erigon-lib/state/aggregator.go +++ b/erigon-lib/state/aggregator.go @@ -186,19 +186,19 @@ func NewAggregator(ctx context.Context, dirs datadir.Dirs, aggregationStep uint6 // return nil, err //} idxCfg := iiCfg{salt: salt, dirs: dirs, db: db} - if a.logAddrs, err = NewInvertedIndex(idxCfg, aggregationStep, "logaddrs", kv.TblLogAddressKeys, kv.TblLogAddressIdx, false, nil, logger); err != nil { + if a.logAddrs, err = NewInvertedIndex(idxCfg, aggregationStep, "logaddrs", kv.TblLogAddressKeys, kv.TblLogAddressIdx, nil, logger); err != nil { return nil, err } idxCfg = iiCfg{salt: salt, dirs: dirs, db: db} - if a.logTopics, err = NewInvertedIndex(idxCfg, aggregationStep, "logtopics", kv.TblLogTopicsKeys, kv.TblLogTopicsIdx, false, nil, logger); err != nil { + if a.logTopics, err = NewInvertedIndex(idxCfg, aggregationStep, "logtopics", kv.TblLogTopicsKeys, kv.TblLogTopicsIdx, nil, logger); err != nil { return nil, err } idxCfg = iiCfg{salt: salt, dirs: dirs, db: db} - if a.tracesFrom, err = NewInvertedIndex(idxCfg, aggregationStep, "tracesfrom", kv.TblTracesFromKeys, kv.TblTracesFromIdx, false, nil, logger); err != nil { + if a.tracesFrom, err = NewInvertedIndex(idxCfg, aggregationStep, "tracesfrom", kv.TblTracesFromKeys, kv.TblTracesFromIdx, nil, logger); err != nil { return nil, err } idxCfg = iiCfg{salt: salt, dirs: dirs, db: db} - if a.tracesTo, err = NewInvertedIndex(idxCfg, aggregationStep, "tracesto", kv.TblTracesToKeys, kv.TblTracesToIdx, false, nil, logger); err != nil { + if a.tracesTo, err = NewInvertedIndex(idxCfg, aggregationStep, "tracesto", kv.TblTracesToKeys, kv.TblTracesToIdx, nil, logger); err != nil { return nil, err } a.KeepStepsInDB(1) @@ -1720,24 +1720,22 @@ func (ac *AggregatorRoTx) HistorySeek(name kv.History, key []byte, ts uint64, tx } } -func (ac *AggregatorRoTx) AccountHistoryRange(startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (iter.KV, error) { - hr, err := ac.d[kv.AccountsDomain].ht.HistoryRange(startTxNum, endTxNum, asc, limit, tx) - if err != nil { - return nil, err - } - return iter.WrapKV(hr), nil -} +func (ac *AggregatorRoTx) HistoryRange(name kv.History, fromTs, toTs int, asc order.By, limit int, tx kv.Tx) (it iter.KV, err error) { + //TODO: aggTx to store array of histories + var domainName kv.Domain -func (ac *AggregatorRoTx) StorageHistoryRange(startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (iter.KV, error) { - hr, err := ac.d[kv.StorageDomain].ht.HistoryRange(startTxNum, endTxNum, asc, limit, tx) - if err != nil { - return nil, err + switch name { + case kv.AccountsHistory: + domainName = kv.AccountsDomain + case kv.StorageHistory: + domainName = kv.StorageDomain + case kv.CodeHistory: + domainName = kv.CodeDomain + default: + return nil, fmt.Errorf("unexpected history name: %s", name) } - return iter.WrapKV(hr), nil -} -func (ac *AggregatorRoTx) CodeHistoryRange(startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (iter.KV, error) { - hr, err := ac.d[kv.CodeDomain].ht.HistoryRange(startTxNum, endTxNum, asc, limit, tx) + hr, err := ac.d[domainName].ht.HistoryRange(fromTs, toTs, asc, limit, tx) if err != nil { return nil, err } @@ -1770,17 +1768,17 @@ type AggregatorRoTx struct { } func (a *Aggregator) BeginFilesRo() *AggregatorRoTx { - a.visibleFilesLock.RLock() ac := &AggregatorRoTx{ - a: a, - logAddrs: a.logAddrs.BeginFilesRo(), - logTopics: a.logTopics.BeginFilesRo(), - tracesFrom: a.tracesFrom.BeginFilesRo(), - tracesTo: a.tracesTo.BeginFilesRo(), - + a: a, id: a.ctxAutoIncrement.Add(1), _leakID: a.leakDetector.Add(), } + + a.visibleFilesLock.RLock() + ac.logAddrs = a.logAddrs.BeginFilesRo() + ac.logTopics = a.logTopics.BeginFilesRo() + ac.tracesFrom = a.tracesFrom.BeginFilesRo() + ac.tracesTo = a.tracesTo.BeginFilesRo() for id, d := range a.d { ac.d[id] = d.BeginFilesRo() } diff --git a/erigon-lib/state/domain.go b/erigon-lib/state/domain.go index d0934f4a372..cc31fcc5edc 100644 --- a/erigon-lib/state/domain.go +++ b/erigon-lib/state/domain.go @@ -819,7 +819,7 @@ func (d *Domain) collate(ctx context.Context, step, txFrom, txTo uint64, roTx kv }() coll.valuesPath = d.kvFilePath(step, step+1) - if coll.valuesComp, err = seg.NewCompressor(ctx, "collate values", coll.valuesPath, d.dirs.Tmp, seg.MinPatternScore, d.compressWorkers, log.LvlTrace, d.logger); err != nil { + if coll.valuesComp, err = seg.NewCompressor(ctx, "collate domain "+d.filenameBase, coll.valuesPath, d.dirs.Tmp, seg.MinPatternScore, d.compressWorkers, log.LvlTrace, d.logger); err != nil { return Collation{}, fmt.Errorf("create %s values compressor: %w", d.filenameBase, err) } comp := NewArchiveWriter(coll.valuesComp, d.compression) @@ -1101,15 +1101,6 @@ func (d *Domain) BuildMissedIndices(ctx context.Context, g *errgroup.Group, ps * } } -func buildIndexFilterThenOpen(ctx context.Context, d *seg.Decompressor, compressed FileCompression, idxPath, tmpdir string, salt *uint32, ps *background.ProgressSet, logger log.Logger, noFsync bool) (*ExistenceFilter, error) { - if err := buildIdxFilter(ctx, d, compressed, idxPath, salt, ps, logger, noFsync); err != nil { - return nil, err - } - if !dir.FileExist(idxPath) { - return nil, nil - } - return OpenExistenceFilter(idxPath) -} func buildIndex(ctx context.Context, d *seg.Decompressor, compressed FileCompression, idxPath string, values bool, cfg recsplit.RecSplitArgs, ps *background.ProgressSet, logger log.Logger, noFsync bool) error { _, fileName := filepath.Split(idxPath) count := d.Count() diff --git a/erigon-lib/state/domain_shared.go b/erigon-lib/state/domain_shared.go index 311a3198db2..ca0e4e8e77a 100644 --- a/erigon-lib/state/domain_shared.go +++ b/erigon-lib/state/domain_shared.go @@ -6,8 +6,6 @@ import ( "context" "encoding/binary" "fmt" - "github.com/ledgerwatch/erigon-lib/common/cryptozerocopy" - "golang.org/x/crypto/sha3" "math" "path/filepath" "runtime" @@ -15,6 +13,9 @@ import ( "time" "unsafe" + "github.com/ledgerwatch/erigon-lib/common/cryptozerocopy" + "golang.org/x/crypto/sha3" + btree2 "github.com/tidwall/btree" "github.com/ledgerwatch/erigon-lib/commitment" @@ -150,10 +151,11 @@ func (sd *SharedDomains) Unwind(ctx context.Context, rwTx kv.RwTx, blockUnwindTo } func (sd *SharedDomains) rebuildCommitment(ctx context.Context, roTx kv.Tx, blockNum uint64) ([]byte, error) { - it, err := sd.aggTx.AccountHistoryRange(int(sd.TxNum()), math.MaxInt64, order.Asc, -1, roTx) + it, err := sd.aggTx.HistoryRange(kv.AccountsHistory, int(sd.TxNum()), math.MaxInt64, order.Asc, -1, roTx) if err != nil { return nil, err } + defer it.Close() for it.HasNext() { k, _, err := it.Next() if err != nil { @@ -162,10 +164,11 @@ func (sd *SharedDomains) rebuildCommitment(ctx context.Context, roTx kv.Tx, bloc sd.sdCtx.TouchKey(kv.AccountsDomain, string(k), nil) } - it, err = sd.aggTx.StorageHistoryRange(int(sd.TxNum()), math.MaxInt64, order.Asc, -1, roTx) + it, err = sd.aggTx.HistoryRange(kv.StorageHistory, int(sd.TxNum()), math.MaxInt64, order.Asc, -1, roTx) if err != nil { return nil, err } + defer it.Close() for it.HasNext() { k, _, err := it.Next() diff --git a/erigon-lib/state/domain_test.go b/erigon-lib/state/domain_test.go index 40716a58441..a9d9393b7bf 100644 --- a/erigon-lib/state/domain_test.go +++ b/erigon-lib/state/domain_test.go @@ -1621,7 +1621,6 @@ func TestDomain_CanPruneAfterAggregation(t *testing.T) { d.historyLargeValues = false d.History.compression = CompressKeys | CompressVals d.compression = CompressKeys | CompressVals - d.withExistenceIndex = true dc := d.BeginFilesRo() defer dc.Close() diff --git a/erigon-lib/state/history.go b/erigon-lib/state/history.go index 5bfddadb2ec..9c9fe9a863c 100644 --- a/erigon-lib/state/history.go +++ b/erigon-lib/state/history.go @@ -123,7 +123,7 @@ func NewHistory(cfg histCfg, aggregationStep uint64, filenameBase, indexKeysTabl } h._visibleFiles = []ctxItem{} var err error - h.InvertedIndex, err = NewInvertedIndex(cfg.iiCfg, aggregationStep, filenameBase, indexKeysTable, indexTable, cfg.withExistenceIndex, func(fromStep, toStep uint64) bool { return dir.FileExist(h.vFilePath(fromStep, toStep)) }, logger) + h.InvertedIndex, err = NewInvertedIndex(cfg.iiCfg, aggregationStep, filenameBase, indexKeysTable, indexTable, func(fromStep, toStep uint64) bool { return dir.FileExist(h.vFilePath(fromStep, toStep)) }, logger) if err != nil { return nil, fmt.Errorf("NewHistory: %s, %w", filenameBase, err) } @@ -585,7 +585,7 @@ func (h *History) collate(ctx context.Context, step, txFrom, txTo uint64, roTx k } }() - comp, err := seg.NewCompressor(ctx, "collate history", historyPath, h.dirs.Tmp, seg.MinPatternScore, h.compressWorkers, log.LvlTrace, h.logger) + comp, err := seg.NewCompressor(ctx, "collate hist "+h.filenameBase, historyPath, h.dirs.Tmp, seg.MinPatternScore, h.compressWorkers, log.LvlTrace, h.logger) if err != nil { return HistoryCollation{}, fmt.Errorf("create %s history compressor: %w", h.filenameBase, err) } @@ -598,7 +598,7 @@ func (h *History) collate(ctx context.Context, step, txFrom, txTo uint64, roTx k defer keysCursor.Close() binary.BigEndian.PutUint64(txKey[:], txFrom) - collector := etl.NewCollector("collate "+h.historyValsTable, h.iiCfg.dirs.Tmp, etl.NewSortableBuffer(CollateETLRAM), h.logger) + collector := etl.NewCollector("collate hist "+h.filenameBase, h.iiCfg.dirs.Tmp, etl.NewSortableBuffer(CollateETLRAM), h.logger) defer collector.Close() for txnmb, k, err := keysCursor.Seek(txKey[:]); err == nil && txnmb != nil; txnmb, k, err = keysCursor.Next() { @@ -636,7 +636,7 @@ func (h *History) collate(ctx context.Context, step, txFrom, txTo uint64, roTx k defer cd.Close() } - efComp, err := seg.NewCompressor(ctx, "collate ef history", efHistoryPath, h.dirs.Tmp, seg.MinPatternScore, h.compressWorkers, log.LvlTrace, h.logger) + efComp, err := seg.NewCompressor(ctx, "collate idx "+h.filenameBase, efHistoryPath, h.dirs.Tmp, seg.MinPatternScore, h.compressWorkers, log.LvlTrace, h.logger) if err != nil { return HistoryCollation{}, fmt.Errorf("create %s ef history compressor: %w", h.filenameBase, err) } @@ -851,12 +851,6 @@ func (h *History) buildFiles(ctx context.Context, step uint64, collation History return HistoryFiles{}, err } } - if h.InvertedIndex.withExistenceIndex { - existenceIdxPath := h.efExistenceIdxFilePath(step, step+1) - if efExistence, err = buildIndexFilterThenOpen(ctx, efHistoryDecomp, h.compression, existenceIdxPath, h.dirs.Tmp, h.salt, ps, h.logger, h.noFsync); err != nil { - return HistoryFiles{}, fmt.Errorf("build %s .ef history idx: %w", h.filenameBase, err) - } - } historyDecomp, err = seg.NewDecompressor(collation.historyPath) if err != nil { diff --git a/erigon-lib/state/inverted_index.go b/erigon-lib/state/inverted_index.go index 216f316bf3c..c331526d52e 100644 --- a/erigon-lib/state/inverted_index.go +++ b/erigon-lib/state/inverted_index.go @@ -83,8 +83,6 @@ type InvertedIndex struct { //TODO: re-visit this check - maybe we don't need it. It's abot kill in the middle of merge integrityCheck func(fromStep, toStep uint64) bool - withExistenceIndex bool - // fields for history write logger log.Logger @@ -101,36 +99,29 @@ type iiCfg struct { db kv.RoDB // global db pointer. mostly for background warmup. } -func NewInvertedIndex(cfg iiCfg, aggregationStep uint64, filenameBase, indexKeysTable, indexTable string, withExistenceIndex bool, integrityCheck func(fromStep uint64, toStep uint64) bool, logger log.Logger) (*InvertedIndex, error) { +func NewInvertedIndex(cfg iiCfg, aggregationStep uint64, filenameBase, indexKeysTable, indexTable string, integrityCheck func(fromStep uint64, toStep uint64) bool, logger log.Logger) (*InvertedIndex, error) { if cfg.dirs.SnapDomain == "" { panic("empty `dirs` varialbe") } ii := InvertedIndex{ - iiCfg: cfg, - dirtyFiles: btree2.NewBTreeGOptions[*filesItem](filesItemLess, btree2.Options{Degree: 128, NoLocks: false}), - aggregationStep: aggregationStep, - filenameBase: filenameBase, - indexKeysTable: indexKeysTable, - indexTable: indexTable, - compressWorkers: 1, - integrityCheck: integrityCheck, - withExistenceIndex: withExistenceIndex, - logger: logger, - compression: CompressNone, + iiCfg: cfg, + dirtyFiles: btree2.NewBTreeGOptions[*filesItem](filesItemLess, btree2.Options{Degree: 128, NoLocks: false}), + aggregationStep: aggregationStep, + filenameBase: filenameBase, + indexKeysTable: indexKeysTable, + indexTable: indexTable, + compressWorkers: 1, + integrityCheck: integrityCheck, + logger: logger, + compression: CompressNone, } ii.indexList = withHashMap - if ii.withExistenceIndex { - ii.indexList |= withExistence - } ii._visibleFiles = []ctxItem{} return &ii, nil } -func (ii *InvertedIndex) efExistenceIdxFilePath(fromStep, toStep uint64) string { - return filepath.Join(ii.dirs.SnapAccessors, fmt.Sprintf("v1-%s.%d-%d.efei", ii.filenameBase, fromStep, toStep)) -} func (ii *InvertedIndex) efAccessorFilePath(fromStep, toStep uint64) string { return filepath.Join(ii.dirs.SnapAccessors, fmt.Sprintf("v1-%s.%d-%d.efi", ii.filenameBase, fromStep, toStep)) } @@ -251,18 +242,6 @@ func (ii *InvertedIndex) missedIdxFiles() (l []*filesItem) { }) return l } -func (ii *InvertedIndex) missedExistenceFilterFiles() (l []*filesItem) { - ii.dirtyFiles.Walk(func(items []*filesItem) bool { - for _, item := range items { - fromStep, toStep := item.startTxNum/ii.aggregationStep, item.endTxNum/ii.aggregationStep - if !dir.FileExist(ii.efExistenceIdxFilePath(fromStep, toStep)) { - l = append(l, item) - } - } - return true - }) - return l -} func (ii *InvertedIndex) buildEfi(ctx context.Context, item *filesItem, ps *background.ProgressSet) (err error) { if item.decompressor == nil { @@ -271,56 +250,6 @@ func (ii *InvertedIndex) buildEfi(ctx context.Context, item *filesItem, ps *back fromStep, toStep := item.startTxNum/ii.aggregationStep, item.endTxNum/ii.aggregationStep return ii.buildMapIdx(ctx, fromStep, toStep, item.decompressor, ps) } -func (ii *InvertedIndex) buildExistenceFilter(ctx context.Context, item *filesItem, ps *background.ProgressSet) (err error) { - if item.decompressor == nil { - return fmt.Errorf("buildExistenceFilter: passed item with nil decompressor %s %d-%d", ii.filenameBase, item.startTxNum/ii.aggregationStep, item.endTxNum/ii.aggregationStep) - } - if !ii.withExistenceIndex { - return nil - } - fromStep, toStep := item.startTxNum/ii.aggregationStep, item.endTxNum/ii.aggregationStep - idxPath := ii.efExistenceIdxFilePath(fromStep, toStep) - return buildIdxFilter(ctx, item.decompressor, ii.compression, idxPath, ii.salt, ps, ii.logger, ii.noFsync) -} - -func buildIdxFilter(ctx context.Context, d *seg.Decompressor, compressed FileCompression, idxPath string, salt *uint32, ps *background.ProgressSet, logger log.Logger, noFsync bool) error { - g := NewArchiveGetter(d.MakeGetter(), compressed) - _, fileName := filepath.Split(idxPath) - count := d.Count() / 2 - - p := ps.AddNew(fileName, uint64(count)) - defer ps.Delete(p) - defer d.EnableReadAhead().DisableReadAhead() - - idxFilter, err := NewExistenceFilter(uint64(count), idxPath) - if err != nil { - return err - } - if noFsync { - idxFilter.DisableFsync() - } - hasher := murmur3.New128WithSeed(*salt) - - key := make([]byte, 0, 256) - g.Reset(0) - for g.HasNext() { - key, _ = g.Next(key[:0]) - hasher.Reset() - hasher.Write(key) //nolint:errcheck - hi, _ := hasher.Sum128() - idxFilter.AddHash(hi) - - // Skip value - g.Skip() - - p.Processed.Add(1) - } - if err := idxFilter.Build(); err != nil { - return err - } - - return nil -} // BuildMissedIndices - produce .efi/.vi/.kvi from .ef/.v/.kv func (ii *InvertedIndex) BuildMissedIndices(ctx context.Context, g *errgroup.Group, ps *background.ProgressSet) { @@ -331,12 +260,6 @@ func (ii *InvertedIndex) BuildMissedIndices(ctx context.Context, g *errgroup.Gro }) } - for _, item := range ii.missedExistenceFilterFiles() { - item := item - g.Go(func() error { - return ii.buildExistenceFilter(ctx, item, ps) - }) - } } func (ii *InvertedIndex) openFiles() error { @@ -383,16 +306,6 @@ func (ii *InvertedIndex) openFiles() error { } } } - if item.existence == nil && ii.withExistenceIndex { - fPath := ii.efExistenceIdxFilePath(fromStep, toStep) - if dir.FileExist(fPath) { - if item.existence, err = OpenExistenceFilter(fPath); err != nil { - _, fName := filepath.Split(fPath) - ii.logger.Warn("[agg] InvertedIndex.openFiles", "err", err, "f", fName) - // don't interrupt on error. other files may be good - } - } - } } return true @@ -627,11 +540,6 @@ func (iit *InvertedIndexRoTx) seekInFiles(key []byte, txNum uint64) (found bool, if iit.files[i].endTxNum <= txNum { continue } - if iit.ii.withExistenceIndex && iit.files[i].src.existence != nil { - if !iit.files[i].src.existence.ContainsHash(hi) { - continue - } - } offset, ok := iit.statelessIdxReader(i).TwoLayerLookupByHash(hi, lo) if !ok { continue @@ -886,7 +794,7 @@ func (iit *InvertedIndexRoTx) Prune(ctx context.Context, rwTx kv.RwTx, txFrom, t } indexWithValues := idxValuesCount != 0 || fn != nil - collector := etl.NewCollector("snapshots", ii.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize/8), ii.logger) + collector := etl.NewCollector("prune idx "+ii.filenameBase, ii.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize/8), ii.logger) defer collector.Close() collector.LogLvl(log.LvlDebug) collector.SortAndFlushInBackground(true) @@ -1462,7 +1370,7 @@ func (ii *InvertedIndex) collate(ctx context.Context, step uint64, roTx kv.Tx) ( } defer keysCursor.Close() - collector := etl.NewCollector("collate "+ii.indexKeysTable, ii.iiCfg.dirs.Tmp, etl.NewSortableBuffer(CollateETLRAM), ii.logger) + collector := etl.NewCollector("collate idx "+ii.filenameBase, ii.iiCfg.dirs.Tmp, etl.NewSortableBuffer(CollateETLRAM), ii.logger) defer collector.Close() collector.LogLvl(log.LvlTrace) @@ -1499,7 +1407,7 @@ func (ii *InvertedIndex) collate(ctx context.Context, step uint64, roTx kv.Tx) ( } }() - comp, err := seg.NewCompressor(ctx, "snapshots", coll.iiPath, ii.dirs.Tmp, seg.MinPatternScore, ii.compressWorkers, log.LvlTrace, ii.logger) + comp, err := seg.NewCompressor(ctx, "collate idx "+ii.filenameBase, coll.iiPath, ii.dirs.Tmp, seg.MinPatternScore, ii.compressWorkers, log.LvlTrace, ii.logger) if err != nil { return InvertedIndexCollation{}, fmt.Errorf("create %s compressor: %w", ii.filenameBase, err) } @@ -1643,13 +1551,6 @@ func (ii *InvertedIndex) buildFiles(ctx context.Context, step uint64, coll Inver return InvertedFiles{}, err } - if ii.withExistenceIndex { - idxPath2 := ii.efExistenceIdxFilePath(step, step+1) - if existence, err = buildIndexFilterThenOpen(ctx, decomp, ii.compression, idxPath2, ii.dirs.Tmp, ii.salt, ps, ii.logger, ii.noFsync); err != nil { - return InvertedFiles{}, fmt.Errorf("build %s efei: %w", ii.filenameBase, err) - } - } - closeComp = false return InvertedFiles{decomp: decomp, index: index, existence: existence}, nil } @@ -1670,10 +1571,6 @@ func (ii *InvertedIndex) buildMapIdx(ctx context.Context, fromStep, toStep uint6 } func (ii *InvertedIndex) integrateDirtyFiles(sf InvertedFiles, txNumFrom, txNumTo uint64) { - if asserts && ii.withExistenceIndex && sf.existence == nil { - panic(fmt.Errorf("assert: no existence index: %s", sf.decomp.FileName())) - } - fi := newFilesItem(txNumFrom, txNumTo, ii.aggregationStep) fi.decompressor = sf.decomp fi.index = sf.index diff --git a/erigon-lib/state/inverted_index_test.go b/erigon-lib/state/inverted_index_test.go index 3e710e17a29..4a4382ef44f 100644 --- a/erigon-lib/state/inverted_index_test.go +++ b/erigon-lib/state/inverted_index_test.go @@ -54,7 +54,7 @@ func testDbAndInvertedIndex(tb testing.TB, aggStep uint64, logger log.Logger) (k tb.Cleanup(db.Close) salt := uint32(1) cfg := iiCfg{salt: &salt, dirs: dirs, db: db} - ii, err := NewInvertedIndex(cfg, aggStep, "inv", keysTable, indexTable, true, nil, logger) + ii, err := NewInvertedIndex(cfg, aggStep, "inv", keysTable, indexTable, nil, logger) require.NoError(tb, err) ii.DisableFsync() tb.Cleanup(ii.Close) @@ -452,7 +452,7 @@ func TestInvIndexScanFiles(t *testing.T) { var err error salt := uint32(1) cfg := iiCfg{salt: &salt, dirs: ii.dirs, db: db} - ii, err = NewInvertedIndex(cfg, ii.aggregationStep, ii.filenameBase, ii.indexKeysTable, ii.indexTable, true, nil, logger) + ii, err = NewInvertedIndex(cfg, ii.aggregationStep, ii.filenameBase, ii.indexKeysTable, ii.indexTable, nil, logger) require.NoError(t, err) defer ii.Close() diff --git a/erigon-lib/state/merge.go b/erigon-lib/state/merge.go index 25355398844..4f9491e6e15 100644 --- a/erigon-lib/state/merge.go +++ b/erigon-lib/state/merge.go @@ -42,8 +42,8 @@ import ( func (d *Domain) dirtyFilesEndTxNumMinimax() uint64 { minimax := d.History.endTxNumMinimax() - if max, ok := d.dirtyFiles.Max(); ok { - endTxNum := max.endTxNum + if _max, ok := d.dirtyFiles.Max(); ok { + endTxNum := _max.endTxNum if minimax == 0 || endTxNum < minimax { minimax = endTxNum } @@ -53,8 +53,8 @@ func (d *Domain) dirtyFilesEndTxNumMinimax() uint64 { func (ii *InvertedIndex) endTxNumMinimax() uint64 { var minimax uint64 - if max, ok := ii.dirtyFiles.Max(); ok { - endTxNum := max.endTxNum + if _max, ok := ii.dirtyFiles.Max(); ok { + endTxNum := _max.endTxNum if minimax == 0 || endTxNum < minimax { minimax = endTxNum } @@ -62,17 +62,17 @@ func (ii *InvertedIndex) endTxNumMinimax() uint64 { return minimax } func (ii *InvertedIndex) endIndexedTxNumMinimax(needFrozen bool) uint64 { - var max uint64 + var _max uint64 ii.dirtyFiles.Walk(func(items []*filesItem) bool { for _, item := range items { if item.index == nil || (needFrozen && !item.frozen) { continue } - max = cmp.Max(max, item.endTxNum) + _max = cmp.Max(_max, item.endTxNum) } return true }) - return max + return _max } func (h *History) endTxNumMinimax() uint64 { @@ -80,8 +80,8 @@ func (h *History) endTxNumMinimax() uint64 { return math.MaxUint64 } minimax := h.InvertedIndex.endTxNumMinimax() - if max, ok := h.dirtyFiles.Max(); ok { - endTxNum := max.endTxNum + if _max, ok := h.dirtyFiles.Max(); ok { + endTxNum := _max.endTxNum if minimax == 0 || endTxNum < minimax { minimax = endTxNum } @@ -89,20 +89,20 @@ func (h *History) endTxNumMinimax() uint64 { return minimax } func (h *History) endIndexedTxNumMinimax(needFrozen bool) uint64 { - var max uint64 + var _max uint64 if h.dontProduceHistoryFiles && h.dirtyFiles.Len() == 0 { - max = math.MaxUint64 + _max = math.MaxUint64 } h.dirtyFiles.Walk(func(items []*filesItem) bool { for _, item := range items { if item.index == nil || (needFrozen && !item.frozen) { continue } - max = cmp.Max(max, item.endTxNum) + _max = cmp.Max(_max, item.endTxNum) } return true }) - return cmp.Min(max, h.InvertedIndex.endIndexedTxNumMinimax(needFrozen)) + return cmp.Min(_max, h.InvertedIndex.endIndexedTxNumMinimax(needFrozen)) } type DomainRanges struct { @@ -315,19 +315,19 @@ func (ht *HistoryRoTx) maxTxNumInFiles(onlyFrozen bool) uint64 { if len(ht.files) == 0 { return 0 } - var max uint64 + var _max uint64 if onlyFrozen { for i := len(ht.files) - 1; i >= 0; i-- { if !ht.files[i].src.frozen { continue } - max = ht.files[i].endTxNum + _max = ht.files[i].endTxNum break } } else { - max = ht.files[len(ht.files)-1].endTxNum + _max = ht.files[len(ht.files)-1].endTxNum } - return cmp.Min(max, ht.iit.maxTxNumInFiles(onlyFrozen)) + return cmp.Min(_max, ht.iit.maxTxNumInFiles(onlyFrozen)) } func (iit *InvertedIndexRoTx) maxTxNumInFiles(onlyFrozen bool) uint64 { @@ -543,7 +543,7 @@ func (dt *DomainRoTx) mergeFiles(ctx context.Context, domainFiles, indexFiles, h fromStep, toStep := r.valuesStartTxNum/dt.d.aggregationStep, r.valuesEndTxNum/dt.d.aggregationStep kvFilePath := dt.d.kvFilePath(fromStep, toStep) - kvFile, err := seg.NewCompressor(ctx, "merge", kvFilePath, dt.d.dirs.Tmp, seg.MinPatternScore, dt.d.compressWorkers, log.LvlTrace, dt.d.logger) + kvFile, err := seg.NewCompressor(ctx, "merge domain "+dt.d.filenameBase, kvFilePath, dt.d.dirs.Tmp, seg.MinPatternScore, dt.d.compressWorkers, log.LvlTrace, dt.d.logger) if err != nil { return nil, nil, nil, fmt.Errorf("merge %s compressor: %w", dt.d.filenameBase, err) } @@ -707,7 +707,7 @@ func (iit *InvertedIndexRoTx) mergeFiles(ctx context.Context, files []*filesItem fromStep, toStep := startTxNum/iit.ii.aggregationStep, endTxNum/iit.ii.aggregationStep datPath := iit.ii.efFilePath(fromStep, toStep) - if comp, err = seg.NewCompressor(ctx, "Snapshots merge", datPath, iit.ii.dirs.Tmp, seg.MinPatternScore, iit.ii.compressWorkers, log.LvlTrace, iit.ii.logger); err != nil { + if comp, err = seg.NewCompressor(ctx, "merge idx "+iit.ii.filenameBase, datPath, iit.ii.dirs.Tmp, seg.MinPatternScore, iit.ii.compressWorkers, log.LvlTrace, iit.ii.logger); err != nil { return nil, fmt.Errorf("merge %s inverted index compressor: %w", iit.ii.filenameBase, err) } if iit.ii.noFsync { @@ -810,13 +810,6 @@ func (iit *InvertedIndexRoTx) mergeFiles(ctx context.Context, files []*filesItem return nil, err } - if iit.ii.withExistenceIndex { - idxPath := iit.ii.efExistenceIdxFilePath(fromStep, toStep) - if outItem.existence, err = buildIndexFilterThenOpen(ctx, outItem.decompressor, iit.ii.compression, idxPath, iit.ii.dirs.Tmp, iit.ii.salt, ps, iit.ii.logger, iit.ii.noFsync); err != nil { - return nil, err - } - } - closeItem = false return outItem, nil } @@ -871,7 +864,7 @@ func (ht *HistoryRoTx) mergeFiles(ctx context.Context, indexFiles, historyFiles fromStep, toStep := r.historyStartTxNum/ht.h.aggregationStep, r.historyEndTxNum/ht.h.aggregationStep datPath := ht.h.vFilePath(fromStep, toStep) idxPath := ht.h.vAccessorFilePath(fromStep, toStep) - if comp, err = seg.NewCompressor(ctx, "merge", datPath, ht.h.dirs.Tmp, seg.MinPatternScore, ht.h.compressWorkers, log.LvlTrace, ht.h.logger); err != nil { + if comp, err = seg.NewCompressor(ctx, "merge hist "+ht.h.filenameBase, datPath, ht.h.dirs.Tmp, seg.MinPatternScore, ht.h.compressWorkers, log.LvlTrace, ht.h.logger); err != nil { return nil, nil, fmt.Errorf("merge %s history compressor: %w", ht.h.filenameBase, err) } compr := NewArchiveWriter(comp, ht.h.compression) diff --git a/erigon-lib/state/merge_test.go b/erigon-lib/state/merge_test.go index 52ab0b24410..a75c8852636 100644 --- a/erigon-lib/state/merge_test.go +++ b/erigon-lib/state/merge_test.go @@ -24,7 +24,6 @@ func emptyTestInvertedIndex(aggStep uint64) *InvertedIndex { func TestFindMergeRangeCornerCases(t *testing.T) { t.Run("> 2 unmerged files", func(t *testing.T) { ii := emptyTestInvertedIndex(1) - ii.withExistenceIndex = false ii.scanStateFiles([]string{ "v1-test.0-2.ef", "v1-test.2-3.ef", From a6d3dc6cf2bbd2391be21b69afa83267007bd6b4 Mon Sep 17 00:00:00 2001 From: battlmonstr Date: Mon, 13 May 2024 13:43:48 +0200 Subject: [PATCH 35/48] polygon/heimdall: scraper observer registrations (#10247) --- polygon/heimdall/scraper.go | 100 +++++++++++++++++-- polygon/p2p/message_listener.go | 127 +++++++----------------- polygon/p2p/peer_tracker.go | 3 +- polygon/p2p/service_mock.go | 31 +++--- polygon/polygoncommon/event_notifier.go | 66 ++++++++++++ polygon/polygoncommon/observers.go | 79 +++++++++++++++ 6 files changed, 293 insertions(+), 113 deletions(-) create mode 100644 polygon/polygoncommon/event_notifier.go create mode 100644 polygon/polygoncommon/observers.go diff --git a/polygon/heimdall/scraper.go b/polygon/heimdall/scraper.go index 58d47ce3fb3..14c40c9acba 100644 --- a/polygon/heimdall/scraper.go +++ b/polygon/heimdall/scraper.go @@ -9,6 +9,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/polygon/polygoncommon" "github.com/ledgerwatch/erigon/turbo/services" ) @@ -18,7 +19,16 @@ type Scraper struct { client HeimdallClient pollDelay time.Duration - logger log.Logger + + checkpointObservers *polygoncommon.Observers[[]*Checkpoint] + milestoneObservers *polygoncommon.Observers[[]*Milestone] + spanObservers *polygoncommon.Observers[[]*Span] + + checkpointSyncEvent *polygoncommon.EventNotifier + milestoneSyncEvent *polygoncommon.EventNotifier + spanSyncEvent *polygoncommon.EventNotifier + + logger log.Logger } func NewScraperTODO( @@ -49,7 +59,16 @@ func NewScraper( client: client, pollDelay: pollDelay, - logger: logger, + + checkpointObservers: polygoncommon.NewObservers[[]*Checkpoint](), + milestoneObservers: polygoncommon.NewObservers[[]*Milestone](), + spanObservers: polygoncommon.NewObservers[[]*Span](), + + checkpointSyncEvent: polygoncommon.NewEventNotifier(), + milestoneSyncEvent: polygoncommon.NewEventNotifier(), + spanSyncEvent: polygoncommon.NewEventNotifier(), + + logger: logger, } } @@ -57,7 +76,8 @@ func (s *Scraper) syncEntity( ctx context.Context, store entityStore, fetcher entityFetcher, - callback func(ClosedRange), + callback func([]Entity), + syncEvent *polygoncommon.EventNotifier, ) error { for ctx.Err() == nil { lastKnownId, hasLastKnownId, err := store.GetLastEntityId(ctx) @@ -78,7 +98,11 @@ func (s *Scraper) syncEntity( } if idRange.Start > idRange.End { + syncEvent.SetAndBroadcast() libcommon.Sleep(ctx, s.pollDelay) + if ctx.Err() != nil { + syncEvent.Reset() + } } else { entities, err := fetcher.FetchEntitiesRange(ctx, idRange) if err != nil { @@ -92,7 +116,7 @@ func (s *Scraper) syncEntity( } if callback != nil { - go callback(idRange) + go callback(entities) } } } @@ -165,6 +189,36 @@ func newSpanFetcher(client HeimdallClient, logger log.Logger) entityFetcher { ) } +func downcastCheckpointEntity(e Entity) *Checkpoint { + return e.(*Checkpoint) +} + +func downcastMilestoneEntity(e Entity) *Milestone { + return e.(*Milestone) +} + +func downcastSpanEntity(e Entity) *Span { + return e.(*Span) +} + +func (s *Scraper) RegisterCheckpointObserver(observer func([]*Checkpoint)) polygoncommon.UnregisterFunc { + return s.checkpointObservers.Register(observer) +} + +func (s *Scraper) RegisterMilestoneObserver(observer func([]*Milestone)) polygoncommon.UnregisterFunc { + return s.milestoneObservers.Register(observer) +} + +func (s *Scraper) RegisterSpanObserver(observer func([]*Span)) polygoncommon.UnregisterFunc { + return s.spanObservers.Register(observer) +} + +func (s *Scraper) Synchronize(ctx context.Context) { + s.checkpointSyncEvent.Wait(ctx) + s.milestoneSyncEvent.Wait(ctx) + s.spanSyncEvent.Wait(ctx) +} + func (s *Scraper) Run(parentCtx context.Context) error { tx := s.txProvider() if tx == nil { @@ -183,16 +237,48 @@ func (s *Scraper) Run(parentCtx context.Context) error { // sync checkpoints group.Go(func() error { - return s.syncEntity(ctx, newCheckpointStore(tx, reader), newCheckpointFetcher(s.client, s.logger), nil /* TODO */) + return s.syncEntity( + ctx, + newCheckpointStore(tx, reader), + newCheckpointFetcher(s.client, s.logger), + func(entities []Entity) { + s.checkpointObservers.Notify(libcommon.SliceMap(entities, downcastCheckpointEntity)) + }, + s.checkpointSyncEvent, + ) }) + // sync milestones group.Go(func() error { - return s.syncEntity(ctx, newMilestoneStore(tx, reader), newMilestoneFetcher(s.client, s.logger), nil /* TODO */) + return s.syncEntity( + ctx, + newMilestoneStore(tx, reader), + newMilestoneFetcher(s.client, s.logger), + func(entities []Entity) { + s.milestoneObservers.Notify(libcommon.SliceMap(entities, downcastMilestoneEntity)) + }, + s.milestoneSyncEvent, + ) }) + // sync spans group.Go(func() error { - return s.syncEntity(ctx, newSpanStore(tx, reader), newSpanFetcher(s.client, s.logger), nil /* TODO */) + return s.syncEntity( + ctx, + newSpanStore(tx, reader), + newSpanFetcher(s.client, s.logger), + func(entities []Entity) { + s.spanObservers.Notify(libcommon.SliceMap(entities, downcastSpanEntity)) + }, + s.spanSyncEvent, + ) }) + defer func() { + s.checkpointObservers.Close() + s.milestoneObservers.Close() + s.spanObservers.Close() + }() + return group.Wait() } diff --git a/polygon/p2p/message_listener.go b/polygon/p2p/message_listener.go index 9da0171cb04..dd87ec9841d 100644 --- a/polygon/p2p/message_listener.go +++ b/polygon/p2p/message_listener.go @@ -12,6 +12,7 @@ import ( sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentryproto" "github.com/ledgerwatch/erigon/eth/protocols/eth" sentrymulticlient "github.com/ledgerwatch/erigon/p2p/sentry/sentry_multi_client" + "github.com/ledgerwatch/erigon/polygon/polygoncommon" "github.com/ledgerwatch/erigon/rlp" ) @@ -21,17 +22,15 @@ type DecodedInboundMessage[TPacket any] struct { PeerId *PeerId } -type MessageObserver[TMessage any] func(message TMessage) - -type UnregisterFunc func() +type UnregisterFunc = polygoncommon.UnregisterFunc type MessageListener interface { Run(ctx context.Context) - RegisterNewBlockObserver(observer MessageObserver[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc - RegisterNewBlockHashesObserver(observer MessageObserver[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc - RegisterBlockHeadersObserver(observer MessageObserver[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc - RegisterBlockBodiesObserver(observer MessageObserver[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc - RegisterPeerEventObserver(observer MessageObserver[*sentry.PeerEvent]) UnregisterFunc + RegisterNewBlockObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc + RegisterNewBlockHashesObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc + RegisterBlockHeadersObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc + RegisterBlockBodiesObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc + RegisterPeerEventObserver(observer polygoncommon.Observer[*sentry.PeerEvent]) UnregisterFunc } func NewMessageListener( @@ -54,27 +53,25 @@ func newMessageListener( sentryClient: sentryClient, statusDataFactory: statusDataFactory, peerPenalizer: peerPenalizer, - newBlockObservers: map[uint64]MessageObserver[*DecodedInboundMessage[*eth.NewBlockPacket]]{}, - newBlockHashesObservers: map[uint64]MessageObserver[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]{}, - blockHeadersObservers: map[uint64]MessageObserver[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]{}, - blockBodiesObservers: map[uint64]MessageObserver[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]{}, - peerEventObservers: map[uint64]MessageObserver[*sentry.PeerEvent]{}, + newBlockObservers: polygoncommon.NewObservers[*DecodedInboundMessage[*eth.NewBlockPacket]](), + newBlockHashesObservers: polygoncommon.NewObservers[*DecodedInboundMessage[*eth.NewBlockHashesPacket]](), + blockHeadersObservers: polygoncommon.NewObservers[*DecodedInboundMessage[*eth.BlockHeadersPacket66]](), + blockBodiesObservers: polygoncommon.NewObservers[*DecodedInboundMessage[*eth.BlockBodiesPacket66]](), + peerEventObservers: polygoncommon.NewObservers[*sentry.PeerEvent](), } } type messageListener struct { once sync.Once - observerIdSequence uint64 logger log.Logger sentryClient direct.SentryClient statusDataFactory sentrymulticlient.StatusDataFactory peerPenalizer PeerPenalizer - observersMu sync.Mutex - newBlockObservers map[uint64]MessageObserver[*DecodedInboundMessage[*eth.NewBlockPacket]] - newBlockHashesObservers map[uint64]MessageObserver[*DecodedInboundMessage[*eth.NewBlockHashesPacket]] - blockHeadersObservers map[uint64]MessageObserver[*DecodedInboundMessage[*eth.BlockHeadersPacket66]] - blockBodiesObservers map[uint64]MessageObserver[*DecodedInboundMessage[*eth.BlockBodiesPacket66]] - peerEventObservers map[uint64]MessageObserver[*sentry.PeerEvent] + newBlockObservers *polygoncommon.Observers[*DecodedInboundMessage[*eth.NewBlockPacket]] + newBlockHashesObservers *polygoncommon.Observers[*DecodedInboundMessage[*eth.NewBlockHashesPacket]] + blockHeadersObservers *polygoncommon.Observers[*DecodedInboundMessage[*eth.BlockHeadersPacket66]] + blockBodiesObservers *polygoncommon.Observers[*DecodedInboundMessage[*eth.BlockBodiesPacket66]] + peerEventObservers *polygoncommon.Observers[*sentry.PeerEvent] stopWg sync.WaitGroup } @@ -96,33 +93,31 @@ func (ml *messageListener) Run(ctx context.Context) { ml.stopWg.Wait() // unregister all observers - ml.observersMu.Lock() - defer ml.observersMu.Unlock() - ml.newBlockObservers = map[uint64]MessageObserver[*DecodedInboundMessage[*eth.NewBlockPacket]]{} - ml.newBlockHashesObservers = map[uint64]MessageObserver[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]{} - ml.blockHeadersObservers = map[uint64]MessageObserver[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]{} - ml.blockBodiesObservers = map[uint64]MessageObserver[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]{} - ml.peerEventObservers = map[uint64]MessageObserver[*sentry.PeerEvent]{} + ml.newBlockObservers.Close() + ml.newBlockHashesObservers.Close() + ml.blockHeadersObservers.Close() + ml.blockBodiesObservers.Close() + ml.peerEventObservers.Close() } -func (ml *messageListener) RegisterNewBlockObserver(observer MessageObserver[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc { - return registerObserver(ml, ml.newBlockObservers, observer) +func (ml *messageListener) RegisterNewBlockObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc { + return ml.newBlockObservers.Register(observer) } -func (ml *messageListener) RegisterNewBlockHashesObserver(observer MessageObserver[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc { - return registerObserver(ml, ml.newBlockHashesObservers, observer) +func (ml *messageListener) RegisterNewBlockHashesObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc { + return ml.newBlockHashesObservers.Register(observer) } -func (ml *messageListener) RegisterBlockHeadersObserver(observer MessageObserver[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc { - return registerObserver(ml, ml.blockHeadersObservers, observer) +func (ml *messageListener) RegisterBlockHeadersObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc { + return ml.blockHeadersObservers.Register(observer) } -func (ml *messageListener) RegisterBlockBodiesObserver(observer MessageObserver[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc { - return registerObserver(ml, ml.blockBodiesObservers, observer) +func (ml *messageListener) RegisterBlockBodiesObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc { + return ml.blockBodiesObservers.Register(observer) } -func (ml *messageListener) RegisterPeerEventObserver(observer MessageObserver[*sentry.PeerEvent]) UnregisterFunc { - return registerObserver(ml, ml.peerEventObservers, observer) +func (ml *messageListener) RegisterPeerEventObserver(observer polygoncommon.Observer[*sentry.PeerEvent]) UnregisterFunc { + return ml.peerEventObservers.Register(observer) } func (ml *messageListener) listenInboundMessages(ctx context.Context) { @@ -140,9 +135,6 @@ func (ml *messageListener) listenInboundMessages(ctx context.Context) { } streamMessages(ctx, ml, "InboundMessages", streamFactory, func(message *sentry.InboundMessage) error { - ml.observersMu.Lock() - defer ml.observersMu.Unlock() - switch message.Id { case sentry.MessageId_NEW_BLOCK_66: return notifyInboundMessageObservers(ctx, ml.logger, ml.peerPenalizer, ml.newBlockObservers, message) @@ -167,52 +159,12 @@ func (ml *messageListener) listenPeerEvents(ctx context.Context) { } func (ml *messageListener) notifyPeerEventObservers(peerEvent *sentry.PeerEvent) error { - ml.observersMu.Lock() - defer ml.observersMu.Unlock() - // wait on all observers to finish processing the peer event before notifying them // with subsequent events in order to preserve the ordering of the sentry messages - var wg sync.WaitGroup - for _, observer := range ml.peerEventObservers { - wg.Add(1) - go func(observer MessageObserver[*sentry.PeerEvent]) { - defer wg.Done() - observer(peerEvent) - }(observer) - } - - wg.Wait() + ml.peerEventObservers.NotifySync(peerEvent) return nil } -func (ml *messageListener) nextObserverId() uint64 { - id := ml.observerIdSequence - ml.observerIdSequence++ - return id -} - -func registerObserver[TMessage any]( - ml *messageListener, - observers map[uint64]MessageObserver[*TMessage], - observer MessageObserver[*TMessage], -) UnregisterFunc { - ml.observersMu.Lock() - defer ml.observersMu.Unlock() - - observerId := ml.nextObserverId() - observers[observerId] = observer - return unregisterFunc(&ml.observersMu, observers, observerId) -} - -func unregisterFunc[TMessage any](mu *sync.Mutex, observers map[uint64]MessageObserver[TMessage], observerId uint64) UnregisterFunc { - return func() { - mu.Lock() - defer mu.Unlock() - - delete(observers, observerId) - } -} - func streamMessages[TMessage any]( ctx context.Context, ml *messageListener, @@ -243,7 +195,7 @@ func notifyInboundMessageObservers[TPacket any]( ctx context.Context, logger log.Logger, peerPenalizer PeerPenalizer, - observers map[uint64]MessageObserver[*DecodedInboundMessage[TPacket]], + observers *polygoncommon.Observers[*DecodedInboundMessage[TPacket]], message *sentry.InboundMessage, ) error { peerId := PeerIdFromH512(message.PeerId) @@ -261,21 +213,16 @@ func notifyInboundMessageObservers[TPacket any]( return err } - notifyObservers(observers, &DecodedInboundMessage[TPacket]{ + decodedMessage := DecodedInboundMessage[TPacket]{ InboundMessage: message, Decoded: decodedData, PeerId: peerId, - }) + } + observers.Notify(&decodedMessage) return nil } -func notifyObservers[TMessage any](observers map[uint64]MessageObserver[TMessage], message TMessage) { - for _, observer := range observers { - go observer(message) - } -} - func messageListenerLogPrefix(message string) string { return fmt.Sprintf("[p2p.message.listener] %s", message) } diff --git a/polygon/p2p/peer_tracker.go b/polygon/p2p/peer_tracker.go index 5fa490d879c..536e5383e8b 100644 --- a/polygon/p2p/peer_tracker.go +++ b/polygon/p2p/peer_tracker.go @@ -6,6 +6,7 @@ import ( "github.com/ledgerwatch/log/v3" sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentryproto" + "github.com/ledgerwatch/erigon/polygon/polygoncommon" ) type PeerTracker interface { @@ -92,7 +93,7 @@ func (pt *peerTracker) updatePeerSyncProgress(peerId *PeerId, update func(psp *p update(peerSyncProgress) } -func NewPeerEventObserver(logger log.Logger, peerTracker PeerTracker) MessageObserver[*sentry.PeerEvent] { +func NewPeerEventObserver(logger log.Logger, peerTracker PeerTracker) polygoncommon.Observer[*sentry.PeerEvent] { return func(message *sentry.PeerEvent) { peerId := PeerIdFromH512(message.PeerId) diff --git a/polygon/p2p/service_mock.go b/polygon/p2p/service_mock.go index 4414d4f171b..7cd943b3a33 100644 --- a/polygon/p2p/service_mock.go +++ b/polygon/p2p/service_mock.go @@ -16,6 +16,7 @@ import ( sentryproto "github.com/ledgerwatch/erigon-lib/gointerfaces/sentryproto" types "github.com/ledgerwatch/erigon/core/types" eth "github.com/ledgerwatch/erigon/eth/protocols/eth" + polygoncommon "github.com/ledgerwatch/erigon/polygon/polygoncommon" gomock "go.uber.org/mock/gomock" ) @@ -418,7 +419,7 @@ func (c *MockServicePenalizeCall) DoAndReturn(f func(context.Context, *PeerId) e } // RegisterBlockBodiesObserver mocks base method. -func (m *MockService) RegisterBlockBodiesObserver(observer MessageObserver[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc { +func (m *MockService) RegisterBlockBodiesObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RegisterBlockBodiesObserver", observer) ret0, _ := ret[0].(UnregisterFunc) @@ -444,19 +445,19 @@ func (c *MockServiceRegisterBlockBodiesObserverCall) Return(arg0 UnregisterFunc) } // Do rewrite *gomock.Call.Do -func (c *MockServiceRegisterBlockBodiesObserverCall) Do(f func(MessageObserver[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc) *MockServiceRegisterBlockBodiesObserverCall { +func (c *MockServiceRegisterBlockBodiesObserverCall) Do(f func(polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc) *MockServiceRegisterBlockBodiesObserverCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockServiceRegisterBlockBodiesObserverCall) DoAndReturn(f func(MessageObserver[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc) *MockServiceRegisterBlockBodiesObserverCall { +func (c *MockServiceRegisterBlockBodiesObserverCall) DoAndReturn(f func(polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc) *MockServiceRegisterBlockBodiesObserverCall { c.Call = c.Call.DoAndReturn(f) return c } // RegisterBlockHeadersObserver mocks base method. -func (m *MockService) RegisterBlockHeadersObserver(observer MessageObserver[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc { +func (m *MockService) RegisterBlockHeadersObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RegisterBlockHeadersObserver", observer) ret0, _ := ret[0].(UnregisterFunc) @@ -482,19 +483,19 @@ func (c *MockServiceRegisterBlockHeadersObserverCall) Return(arg0 UnregisterFunc } // Do rewrite *gomock.Call.Do -func (c *MockServiceRegisterBlockHeadersObserverCall) Do(f func(MessageObserver[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc) *MockServiceRegisterBlockHeadersObserverCall { +func (c *MockServiceRegisterBlockHeadersObserverCall) Do(f func(polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc) *MockServiceRegisterBlockHeadersObserverCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockServiceRegisterBlockHeadersObserverCall) DoAndReturn(f func(MessageObserver[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc) *MockServiceRegisterBlockHeadersObserverCall { +func (c *MockServiceRegisterBlockHeadersObserverCall) DoAndReturn(f func(polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc) *MockServiceRegisterBlockHeadersObserverCall { c.Call = c.Call.DoAndReturn(f) return c } // RegisterNewBlockHashesObserver mocks base method. -func (m *MockService) RegisterNewBlockHashesObserver(observer MessageObserver[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc { +func (m *MockService) RegisterNewBlockHashesObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RegisterNewBlockHashesObserver", observer) ret0, _ := ret[0].(UnregisterFunc) @@ -520,19 +521,19 @@ func (c *MockServiceRegisterNewBlockHashesObserverCall) Return(arg0 UnregisterFu } // Do rewrite *gomock.Call.Do -func (c *MockServiceRegisterNewBlockHashesObserverCall) Do(f func(MessageObserver[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc) *MockServiceRegisterNewBlockHashesObserverCall { +func (c *MockServiceRegisterNewBlockHashesObserverCall) Do(f func(polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc) *MockServiceRegisterNewBlockHashesObserverCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockServiceRegisterNewBlockHashesObserverCall) DoAndReturn(f func(MessageObserver[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc) *MockServiceRegisterNewBlockHashesObserverCall { +func (c *MockServiceRegisterNewBlockHashesObserverCall) DoAndReturn(f func(polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc) *MockServiceRegisterNewBlockHashesObserverCall { c.Call = c.Call.DoAndReturn(f) return c } // RegisterNewBlockObserver mocks base method. -func (m *MockService) RegisterNewBlockObserver(observer MessageObserver[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc { +func (m *MockService) RegisterNewBlockObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RegisterNewBlockObserver", observer) ret0, _ := ret[0].(UnregisterFunc) @@ -558,19 +559,19 @@ func (c *MockServiceRegisterNewBlockObserverCall) Return(arg0 UnregisterFunc) *M } // Do rewrite *gomock.Call.Do -func (c *MockServiceRegisterNewBlockObserverCall) Do(f func(MessageObserver[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc) *MockServiceRegisterNewBlockObserverCall { +func (c *MockServiceRegisterNewBlockObserverCall) Do(f func(polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc) *MockServiceRegisterNewBlockObserverCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockServiceRegisterNewBlockObserverCall) DoAndReturn(f func(MessageObserver[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc) *MockServiceRegisterNewBlockObserverCall { +func (c *MockServiceRegisterNewBlockObserverCall) DoAndReturn(f func(polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc) *MockServiceRegisterNewBlockObserverCall { c.Call = c.Call.DoAndReturn(f) return c } // RegisterPeerEventObserver mocks base method. -func (m *MockService) RegisterPeerEventObserver(observer MessageObserver[*sentryproto.PeerEvent]) UnregisterFunc { +func (m *MockService) RegisterPeerEventObserver(observer polygoncommon.Observer[*sentryproto.PeerEvent]) UnregisterFunc { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RegisterPeerEventObserver", observer) ret0, _ := ret[0].(UnregisterFunc) @@ -596,13 +597,13 @@ func (c *MockServiceRegisterPeerEventObserverCall) Return(arg0 UnregisterFunc) * } // Do rewrite *gomock.Call.Do -func (c *MockServiceRegisterPeerEventObserverCall) Do(f func(MessageObserver[*sentryproto.PeerEvent]) UnregisterFunc) *MockServiceRegisterPeerEventObserverCall { +func (c *MockServiceRegisterPeerEventObserverCall) Do(f func(polygoncommon.Observer[*sentryproto.PeerEvent]) UnregisterFunc) *MockServiceRegisterPeerEventObserverCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockServiceRegisterPeerEventObserverCall) DoAndReturn(f func(MessageObserver[*sentryproto.PeerEvent]) UnregisterFunc) *MockServiceRegisterPeerEventObserverCall { +func (c *MockServiceRegisterPeerEventObserverCall) DoAndReturn(f func(polygoncommon.Observer[*sentryproto.PeerEvent]) UnregisterFunc) *MockServiceRegisterPeerEventObserverCall { c.Call = c.Call.DoAndReturn(f) return c } diff --git a/polygon/polygoncommon/event_notifier.go b/polygon/polygoncommon/event_notifier.go new file mode 100644 index 00000000000..d913b8247d3 --- /dev/null +++ b/polygon/polygoncommon/event_notifier.go @@ -0,0 +1,66 @@ +package polygoncommon + +import ( + "context" + "sync" + "sync/atomic" +) + +// EventNotifier notifies waiters about an event. +// It supports a single "producer" and multiple waiters. +// A producer can set the event state to "signaled" or "non-signaled". +// Waiters can wait for the "signaled" event state. +type EventNotifier struct { + mutex sync.Mutex + cond *sync.Cond + hasEvent atomic.Bool +} + +func NewEventNotifier() *EventNotifier { + instance := &EventNotifier{} + instance.cond = sync.NewCond(&instance.mutex) + return instance +} + +// Reset to the "non-signaled" state. +func (en *EventNotifier) Reset() { + en.hasEvent.Store(false) +} + +// SetAndBroadcast sets the "signaled" state and notifies all waiters. +func (en *EventNotifier) SetAndBroadcast() { + en.hasEvent.Store(true) + en.cond.Broadcast() +} + +// Wait for the "signaled" state. +// If the event is already "signaled" it returns immediately. +func (en *EventNotifier) Wait(ctx context.Context) { + waitCtx, waitCancel := context.WithCancel(ctx) + defer waitCancel() + + var wg sync.WaitGroup + wg.Add(1) + + go func() { + defer wg.Done() + + en.mutex.Lock() + defer en.mutex.Unlock() + + for !en.hasEvent.Load() && (waitCtx.Err() == nil) { + en.cond.Wait() + } + waitCancel() + }() + + // wait for the waiting goroutine or the parent context to finish, whichever happens first + <-waitCtx.Done() + + // if the parent context is done, force the waiting goroutine to exit + // this might lead to spurious wake ups for other waiters, + // but it is ok due to the waiting loop conditions + en.cond.Broadcast() + + wg.Wait() +} diff --git a/polygon/polygoncommon/observers.go b/polygon/polygoncommon/observers.go new file mode 100644 index 00000000000..53276785b40 --- /dev/null +++ b/polygon/polygoncommon/observers.go @@ -0,0 +1,79 @@ +package polygoncommon + +import ( + "sync" +) + +type Observer[TEvent any] func(event TEvent) +type UnregisterFunc func() + +type Observers[TEvent any] struct { + observers map[uint64]Observer[TEvent] + observerIdSequence uint64 + observersMu sync.Mutex +} + +func NewObservers[TEvent any]() *Observers[TEvent] { + return &Observers[TEvent]{ + observers: map[uint64]Observer[TEvent]{}, + } +} + +func (o *Observers[TEvent]) nextObserverId() uint64 { + o.observerIdSequence++ + return o.observerIdSequence +} + +// Register an observer. Call the returned function to unregister it. +func (o *Observers[TEvent]) Register(observer Observer[TEvent]) UnregisterFunc { + o.observersMu.Lock() + defer o.observersMu.Unlock() + + observerId := o.nextObserverId() + o.observers[observerId] = observer + return o.unregisterFunc(observerId) +} + +func (o *Observers[TEvent]) unregisterFunc(observerId uint64) UnregisterFunc { + return func() { + o.observersMu.Lock() + defer o.observersMu.Unlock() + + delete(o.observers, observerId) + } +} + +// Close unregisters all observers. +func (o *Observers[TEvent]) Close() { + o.observersMu.Lock() + defer o.observersMu.Unlock() + + o.observers = map[uint64]Observer[TEvent]{} +} + +// Notify all observers in parallel without waiting for them to process the event. +func (o *Observers[TEvent]) Notify(event TEvent) { + o.observersMu.Lock() + defer o.observersMu.Unlock() + + for _, observer := range o.observers { + go observer(event) + } +} + +// NotifySync all observers in parallel and wait until all of them process the event. +func (o *Observers[TEvent]) NotifySync(event TEvent) { + o.observersMu.Lock() + defer o.observersMu.Unlock() + + var wg sync.WaitGroup + for _, observer := range o.observers { + wg.Add(1) + go func(observer Observer[TEvent]) { + defer wg.Done() + observer(event) + }(observer) + } + + wg.Wait() +} From 813e59eb16d95cdede5ea73021c10f5181feadef Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Mon, 13 May 2024 19:54:08 +0200 Subject: [PATCH 36/48] Better error message in blob decoding (#10301) --- core/types/blob_tx.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/types/blob_tx.go b/core/types/blob_tx.go index 997d06fc45b..7355c0acb72 100644 --- a/core/types/blob_tx.go +++ b/core/types/blob_tx.go @@ -377,7 +377,7 @@ func decodeBlobVersionedHashes(hashes *[]libcommon.Hash, s *rlp.Stream) error { copy((_hash)[:], b) *hashes = append(*hashes, _hash) } else { - return fmt.Errorf("wrong size for blobVersionedHashes: %d, %v", len(b), b[0]) + return fmt.Errorf("wrong size for blobVersionedHashes: %d", len(b)) } } From d97c3dae600afd400dbee85c77a4aaf2840ad950 Mon Sep 17 00:00:00 2001 From: Kewei Date: Tue, 14 May 2024 04:21:53 +0800 Subject: [PATCH 37/48] Unittest for cl network services (#10080) mainly add unittest for - bls execution change service - voluntary exit service - proposer slashing service Plus other minor bug fixes and adjustment --- cl/abstract/beacon_state.go | 17 +- .../beacon_state_mutator_mock.go | 2123 +++++++++++++++++ .../mock_services/beacon_state_reader_mock.go | 119 +- cl/beacon/synced_data/interface.go | 10 +- .../mock_services/synced_data_mock.go | 49 +- cl/beacon/synced_data/synced_data.go | 11 +- cl/phase1/core/state/interface.go | 12 - .../services/attestation_service_test.go | 24 +- .../services/blob_sidecar_service_test.go | 34 +- .../bls_to_execution_change_service.go | 33 +- .../bls_to_execution_change_service_test.go | 208 ++ .../network/services/global_mock_test.go | 26 + .../services/proposer_slashing_service.go | 14 +- .../proposer_slashing_service_test.go | 223 ++ .../services/voluntary_exit_service.go | 13 +- .../services/voluntary_exit_service_test.go | 224 ++ cl/phase1/network/subnets/subnets.go | 3 +- 17 files changed, 3063 insertions(+), 80 deletions(-) create mode 100644 cl/abstract/mock_services/beacon_state_mutator_mock.go rename cl/{phase1/core/state => abstract}/mock_services/beacon_state_reader_mock.go (54%) delete mode 100644 cl/phase1/core/state/interface.go create mode 100644 cl/phase1/network/services/bls_to_execution_change_service_test.go create mode 100644 cl/phase1/network/services/global_mock_test.go create mode 100644 cl/phase1/network/services/proposer_slashing_service_test.go create mode 100644 cl/phase1/network/services/voluntary_exit_service_test.go diff --git a/cl/abstract/beacon_state.go b/cl/abstract/beacon_state.go index cc77a206181..c10eacac9f7 100644 --- a/cl/abstract/beacon_state.go +++ b/cl/abstract/beacon_state.go @@ -61,6 +61,7 @@ type BeaconStateSSZ interface { HashSSZ() (out [32]byte, err error) } +//go:generate mockgen -typed=true -destination=./mock_services/beacon_state_mutator_mock.go -package=mock_services . BeaconStateMutator type BeaconStateMutator interface { SetVersion(version clparams.StateVersion) SetSlot(slot uint64) @@ -104,7 +105,7 @@ type BeaconStateMutator interface { SetValidatorInactivityScore(index int, score uint64) error SetCurrentEpochParticipationFlags(flags []cltypes.ParticipationFlags) SetPreviousEpochParticipationFlags(flags []cltypes.ParticipationFlags) - SetPreviousEpochAttestations(attestations *solid.ListSSZ[*solid.PendingAttestation]) + SetPreviousEpochAttestations(attestations *solid.ListSSZ[*solid.PendingAttestation]) // temporarily skip this mock AddEth1DataVote(vote *cltypes.Eth1Data) AddValidator(validator solid.Validator, balance uint64) @@ -192,8 +193,14 @@ type BeaconStateMinimal interface { PreviousEpochAttestationsLength() int } -// TODO figure this out -type BeaconStateCopying interface { - //CopyInto(dst *raw.BeaconState) error - //Copy() (*raw.BeaconState, error) +// BeaconStateReader is an interface for reading the beacon state. +// +//go:generate mockgen -typed=true -destination=./mock_services/beacon_state_reader_mock.go -package=mock_services . BeaconStateReader +type BeaconStateReader interface { + ValidatorPublicKey(index int) (common.Bytes48, error) + GetDomain(domainType [4]byte, epoch uint64) ([]byte, error) + CommitteeCount(epoch uint64) uint64 + ValidatorForValidatorIndex(index int) (solid.Validator, error) + Version() clparams.StateVersion + GenesisValidatorsRoot() common.Hash } diff --git a/cl/abstract/mock_services/beacon_state_mutator_mock.go b/cl/abstract/mock_services/beacon_state_mutator_mock.go new file mode 100644 index 00000000000..b265cebddab --- /dev/null +++ b/cl/abstract/mock_services/beacon_state_mutator_mock.go @@ -0,0 +1,2123 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon/cl/abstract (interfaces: BeaconStateMutator) +// +// Generated by this command: +// +// mockgen -typed=true -destination=./mock_services/beacon_state_mutator_mock.go -package=mock_services . BeaconStateMutator +// + +// Package mock_services is a generated GoMock package. +package mock_services + +import ( + reflect "reflect" + + common "github.com/ledgerwatch/erigon-lib/common" + clparams "github.com/ledgerwatch/erigon/cl/clparams" + cltypes "github.com/ledgerwatch/erigon/cl/cltypes" + solid "github.com/ledgerwatch/erigon/cl/cltypes/solid" + gomock "go.uber.org/mock/gomock" +) + +// MockBeaconStateMutator is a mock of BeaconStateMutator interface. +type MockBeaconStateMutator struct { + ctrl *gomock.Controller + recorder *MockBeaconStateMutatorMockRecorder +} + +// MockBeaconStateMutatorMockRecorder is the mock recorder for MockBeaconStateMutator. +type MockBeaconStateMutatorMockRecorder struct { + mock *MockBeaconStateMutator +} + +// NewMockBeaconStateMutator creates a new mock instance. +func NewMockBeaconStateMutator(ctrl *gomock.Controller) *MockBeaconStateMutator { + mock := &MockBeaconStateMutator{ctrl: ctrl} + mock.recorder = &MockBeaconStateMutatorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBeaconStateMutator) EXPECT() *MockBeaconStateMutatorMockRecorder { + return m.recorder +} + +// AddCurrentEpochAtteastation mocks base method. +func (m *MockBeaconStateMutator) AddCurrentEpochAtteastation(arg0 *solid.PendingAttestation) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddCurrentEpochAtteastation", arg0) +} + +// AddCurrentEpochAtteastation indicates an expected call of AddCurrentEpochAtteastation. +func (mr *MockBeaconStateMutatorMockRecorder) AddCurrentEpochAtteastation(arg0 any) *MockBeaconStateMutatorAddCurrentEpochAtteastationCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddCurrentEpochAtteastation", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddCurrentEpochAtteastation), arg0) + return &MockBeaconStateMutatorAddCurrentEpochAtteastationCall{Call: call} +} + +// MockBeaconStateMutatorAddCurrentEpochAtteastationCall wrap *gomock.Call +type MockBeaconStateMutatorAddCurrentEpochAtteastationCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorAddCurrentEpochAtteastationCall) Return() *MockBeaconStateMutatorAddCurrentEpochAtteastationCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorAddCurrentEpochAtteastationCall) Do(f func(*solid.PendingAttestation)) *MockBeaconStateMutatorAddCurrentEpochAtteastationCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorAddCurrentEpochAtteastationCall) DoAndReturn(f func(*solid.PendingAttestation)) *MockBeaconStateMutatorAddCurrentEpochAtteastationCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// AddCurrentEpochParticipationFlags mocks base method. +func (m *MockBeaconStateMutator) AddCurrentEpochParticipationFlags(arg0 cltypes.ParticipationFlags) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddCurrentEpochParticipationFlags", arg0) +} + +// AddCurrentEpochParticipationFlags indicates an expected call of AddCurrentEpochParticipationFlags. +func (mr *MockBeaconStateMutatorMockRecorder) AddCurrentEpochParticipationFlags(arg0 any) *MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddCurrentEpochParticipationFlags", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddCurrentEpochParticipationFlags), arg0) + return &MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall{Call: call} +} + +// MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall wrap *gomock.Call +type MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall) Return() *MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall) Do(f func(cltypes.ParticipationFlags)) *MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall) DoAndReturn(f func(cltypes.ParticipationFlags)) *MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// AddEth1DataVote mocks base method. +func (m *MockBeaconStateMutator) AddEth1DataVote(arg0 *cltypes.Eth1Data) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddEth1DataVote", arg0) +} + +// AddEth1DataVote indicates an expected call of AddEth1DataVote. +func (mr *MockBeaconStateMutatorMockRecorder) AddEth1DataVote(arg0 any) *MockBeaconStateMutatorAddEth1DataVoteCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddEth1DataVote", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddEth1DataVote), arg0) + return &MockBeaconStateMutatorAddEth1DataVoteCall{Call: call} +} + +// MockBeaconStateMutatorAddEth1DataVoteCall wrap *gomock.Call +type MockBeaconStateMutatorAddEth1DataVoteCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorAddEth1DataVoteCall) Return() *MockBeaconStateMutatorAddEth1DataVoteCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorAddEth1DataVoteCall) Do(f func(*cltypes.Eth1Data)) *MockBeaconStateMutatorAddEth1DataVoteCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorAddEth1DataVoteCall) DoAndReturn(f func(*cltypes.Eth1Data)) *MockBeaconStateMutatorAddEth1DataVoteCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// AddHistoricalRoot mocks base method. +func (m *MockBeaconStateMutator) AddHistoricalRoot(arg0 common.Hash) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddHistoricalRoot", arg0) +} + +// AddHistoricalRoot indicates an expected call of AddHistoricalRoot. +func (mr *MockBeaconStateMutatorMockRecorder) AddHistoricalRoot(arg0 any) *MockBeaconStateMutatorAddHistoricalRootCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddHistoricalRoot", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddHistoricalRoot), arg0) + return &MockBeaconStateMutatorAddHistoricalRootCall{Call: call} +} + +// MockBeaconStateMutatorAddHistoricalRootCall wrap *gomock.Call +type MockBeaconStateMutatorAddHistoricalRootCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorAddHistoricalRootCall) Return() *MockBeaconStateMutatorAddHistoricalRootCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorAddHistoricalRootCall) Do(f func(common.Hash)) *MockBeaconStateMutatorAddHistoricalRootCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorAddHistoricalRootCall) DoAndReturn(f func(common.Hash)) *MockBeaconStateMutatorAddHistoricalRootCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// AddHistoricalSummary mocks base method. +func (m *MockBeaconStateMutator) AddHistoricalSummary(arg0 *cltypes.HistoricalSummary) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddHistoricalSummary", arg0) +} + +// AddHistoricalSummary indicates an expected call of AddHistoricalSummary. +func (mr *MockBeaconStateMutatorMockRecorder) AddHistoricalSummary(arg0 any) *MockBeaconStateMutatorAddHistoricalSummaryCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddHistoricalSummary", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddHistoricalSummary), arg0) + return &MockBeaconStateMutatorAddHistoricalSummaryCall{Call: call} +} + +// MockBeaconStateMutatorAddHistoricalSummaryCall wrap *gomock.Call +type MockBeaconStateMutatorAddHistoricalSummaryCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorAddHistoricalSummaryCall) Return() *MockBeaconStateMutatorAddHistoricalSummaryCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorAddHistoricalSummaryCall) Do(f func(*cltypes.HistoricalSummary)) *MockBeaconStateMutatorAddHistoricalSummaryCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorAddHistoricalSummaryCall) DoAndReturn(f func(*cltypes.HistoricalSummary)) *MockBeaconStateMutatorAddHistoricalSummaryCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// AddInactivityScore mocks base method. +func (m *MockBeaconStateMutator) AddInactivityScore(arg0 uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddInactivityScore", arg0) +} + +// AddInactivityScore indicates an expected call of AddInactivityScore. +func (mr *MockBeaconStateMutatorMockRecorder) AddInactivityScore(arg0 any) *MockBeaconStateMutatorAddInactivityScoreCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddInactivityScore", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddInactivityScore), arg0) + return &MockBeaconStateMutatorAddInactivityScoreCall{Call: call} +} + +// MockBeaconStateMutatorAddInactivityScoreCall wrap *gomock.Call +type MockBeaconStateMutatorAddInactivityScoreCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorAddInactivityScoreCall) Return() *MockBeaconStateMutatorAddInactivityScoreCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorAddInactivityScoreCall) Do(f func(uint64)) *MockBeaconStateMutatorAddInactivityScoreCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorAddInactivityScoreCall) DoAndReturn(f func(uint64)) *MockBeaconStateMutatorAddInactivityScoreCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// AddPreviousEpochAttestation mocks base method. +func (m *MockBeaconStateMutator) AddPreviousEpochAttestation(arg0 *solid.PendingAttestation) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddPreviousEpochAttestation", arg0) +} + +// AddPreviousEpochAttestation indicates an expected call of AddPreviousEpochAttestation. +func (mr *MockBeaconStateMutatorMockRecorder) AddPreviousEpochAttestation(arg0 any) *MockBeaconStateMutatorAddPreviousEpochAttestationCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPreviousEpochAttestation", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddPreviousEpochAttestation), arg0) + return &MockBeaconStateMutatorAddPreviousEpochAttestationCall{Call: call} +} + +// MockBeaconStateMutatorAddPreviousEpochAttestationCall wrap *gomock.Call +type MockBeaconStateMutatorAddPreviousEpochAttestationCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorAddPreviousEpochAttestationCall) Return() *MockBeaconStateMutatorAddPreviousEpochAttestationCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorAddPreviousEpochAttestationCall) Do(f func(*solid.PendingAttestation)) *MockBeaconStateMutatorAddPreviousEpochAttestationCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorAddPreviousEpochAttestationCall) DoAndReturn(f func(*solid.PendingAttestation)) *MockBeaconStateMutatorAddPreviousEpochAttestationCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// AddPreviousEpochParticipationAt mocks base method. +func (m *MockBeaconStateMutator) AddPreviousEpochParticipationAt(arg0 int, arg1 byte) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddPreviousEpochParticipationAt", arg0, arg1) +} + +// AddPreviousEpochParticipationAt indicates an expected call of AddPreviousEpochParticipationAt. +func (mr *MockBeaconStateMutatorMockRecorder) AddPreviousEpochParticipationAt(arg0, arg1 any) *MockBeaconStateMutatorAddPreviousEpochParticipationAtCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPreviousEpochParticipationAt", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddPreviousEpochParticipationAt), arg0, arg1) + return &MockBeaconStateMutatorAddPreviousEpochParticipationAtCall{Call: call} +} + +// MockBeaconStateMutatorAddPreviousEpochParticipationAtCall wrap *gomock.Call +type MockBeaconStateMutatorAddPreviousEpochParticipationAtCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorAddPreviousEpochParticipationAtCall) Return() *MockBeaconStateMutatorAddPreviousEpochParticipationAtCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorAddPreviousEpochParticipationAtCall) Do(f func(int, byte)) *MockBeaconStateMutatorAddPreviousEpochParticipationAtCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorAddPreviousEpochParticipationAtCall) DoAndReturn(f func(int, byte)) *MockBeaconStateMutatorAddPreviousEpochParticipationAtCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// AddPreviousEpochParticipationFlags mocks base method. +func (m *MockBeaconStateMutator) AddPreviousEpochParticipationFlags(arg0 cltypes.ParticipationFlags) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddPreviousEpochParticipationFlags", arg0) +} + +// AddPreviousEpochParticipationFlags indicates an expected call of AddPreviousEpochParticipationFlags. +func (mr *MockBeaconStateMutatorMockRecorder) AddPreviousEpochParticipationFlags(arg0 any) *MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPreviousEpochParticipationFlags", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddPreviousEpochParticipationFlags), arg0) + return &MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall{Call: call} +} + +// MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall wrap *gomock.Call +type MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall) Return() *MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall) Do(f func(cltypes.ParticipationFlags)) *MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall) DoAndReturn(f func(cltypes.ParticipationFlags)) *MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// AddValidator mocks base method. +func (m *MockBeaconStateMutator) AddValidator(arg0 solid.Validator, arg1 uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddValidator", arg0, arg1) +} + +// AddValidator indicates an expected call of AddValidator. +func (mr *MockBeaconStateMutatorMockRecorder) AddValidator(arg0, arg1 any) *MockBeaconStateMutatorAddValidatorCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddValidator", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddValidator), arg0, arg1) + return &MockBeaconStateMutatorAddValidatorCall{Call: call} +} + +// MockBeaconStateMutatorAddValidatorCall wrap *gomock.Call +type MockBeaconStateMutatorAddValidatorCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorAddValidatorCall) Return() *MockBeaconStateMutatorAddValidatorCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorAddValidatorCall) Do(f func(solid.Validator, uint64)) *MockBeaconStateMutatorAddValidatorCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorAddValidatorCall) DoAndReturn(f func(solid.Validator, uint64)) *MockBeaconStateMutatorAddValidatorCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// AppendValidator mocks base method. +func (m *MockBeaconStateMutator) AppendValidator(arg0 solid.Validator) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AppendValidator", arg0) +} + +// AppendValidator indicates an expected call of AppendValidator. +func (mr *MockBeaconStateMutatorMockRecorder) AppendValidator(arg0 any) *MockBeaconStateMutatorAppendValidatorCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppendValidator", reflect.TypeOf((*MockBeaconStateMutator)(nil).AppendValidator), arg0) + return &MockBeaconStateMutatorAppendValidatorCall{Call: call} +} + +// MockBeaconStateMutatorAppendValidatorCall wrap *gomock.Call +type MockBeaconStateMutatorAppendValidatorCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorAppendValidatorCall) Return() *MockBeaconStateMutatorAppendValidatorCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorAppendValidatorCall) Do(f func(solid.Validator)) *MockBeaconStateMutatorAppendValidatorCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorAppendValidatorCall) DoAndReturn(f func(solid.Validator)) *MockBeaconStateMutatorAppendValidatorCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ResetCurrentEpochAttestations mocks base method. +func (m *MockBeaconStateMutator) ResetCurrentEpochAttestations() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ResetCurrentEpochAttestations") +} + +// ResetCurrentEpochAttestations indicates an expected call of ResetCurrentEpochAttestations. +func (mr *MockBeaconStateMutatorMockRecorder) ResetCurrentEpochAttestations() *MockBeaconStateMutatorResetCurrentEpochAttestationsCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetCurrentEpochAttestations", reflect.TypeOf((*MockBeaconStateMutator)(nil).ResetCurrentEpochAttestations)) + return &MockBeaconStateMutatorResetCurrentEpochAttestationsCall{Call: call} +} + +// MockBeaconStateMutatorResetCurrentEpochAttestationsCall wrap *gomock.Call +type MockBeaconStateMutatorResetCurrentEpochAttestationsCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorResetCurrentEpochAttestationsCall) Return() *MockBeaconStateMutatorResetCurrentEpochAttestationsCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorResetCurrentEpochAttestationsCall) Do(f func()) *MockBeaconStateMutatorResetCurrentEpochAttestationsCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorResetCurrentEpochAttestationsCall) DoAndReturn(f func()) *MockBeaconStateMutatorResetCurrentEpochAttestationsCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ResetEpochParticipation mocks base method. +func (m *MockBeaconStateMutator) ResetEpochParticipation() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ResetEpochParticipation") +} + +// ResetEpochParticipation indicates an expected call of ResetEpochParticipation. +func (mr *MockBeaconStateMutatorMockRecorder) ResetEpochParticipation() *MockBeaconStateMutatorResetEpochParticipationCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetEpochParticipation", reflect.TypeOf((*MockBeaconStateMutator)(nil).ResetEpochParticipation)) + return &MockBeaconStateMutatorResetEpochParticipationCall{Call: call} +} + +// MockBeaconStateMutatorResetEpochParticipationCall wrap *gomock.Call +type MockBeaconStateMutatorResetEpochParticipationCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorResetEpochParticipationCall) Return() *MockBeaconStateMutatorResetEpochParticipationCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorResetEpochParticipationCall) Do(f func()) *MockBeaconStateMutatorResetEpochParticipationCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorResetEpochParticipationCall) DoAndReturn(f func()) *MockBeaconStateMutatorResetEpochParticipationCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ResetEth1DataVotes mocks base method. +func (m *MockBeaconStateMutator) ResetEth1DataVotes() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ResetEth1DataVotes") +} + +// ResetEth1DataVotes indicates an expected call of ResetEth1DataVotes. +func (mr *MockBeaconStateMutatorMockRecorder) ResetEth1DataVotes() *MockBeaconStateMutatorResetEth1DataVotesCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetEth1DataVotes", reflect.TypeOf((*MockBeaconStateMutator)(nil).ResetEth1DataVotes)) + return &MockBeaconStateMutatorResetEth1DataVotesCall{Call: call} +} + +// MockBeaconStateMutatorResetEth1DataVotesCall wrap *gomock.Call +type MockBeaconStateMutatorResetEth1DataVotesCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorResetEth1DataVotesCall) Return() *MockBeaconStateMutatorResetEth1DataVotesCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorResetEth1DataVotesCall) Do(f func()) *MockBeaconStateMutatorResetEth1DataVotesCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorResetEth1DataVotesCall) DoAndReturn(f func()) *MockBeaconStateMutatorResetEth1DataVotesCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ResetHistoricalSummaries mocks base method. +func (m *MockBeaconStateMutator) ResetHistoricalSummaries() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ResetHistoricalSummaries") +} + +// ResetHistoricalSummaries indicates an expected call of ResetHistoricalSummaries. +func (mr *MockBeaconStateMutatorMockRecorder) ResetHistoricalSummaries() *MockBeaconStateMutatorResetHistoricalSummariesCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetHistoricalSummaries", reflect.TypeOf((*MockBeaconStateMutator)(nil).ResetHistoricalSummaries)) + return &MockBeaconStateMutatorResetHistoricalSummariesCall{Call: call} +} + +// MockBeaconStateMutatorResetHistoricalSummariesCall wrap *gomock.Call +type MockBeaconStateMutatorResetHistoricalSummariesCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorResetHistoricalSummariesCall) Return() *MockBeaconStateMutatorResetHistoricalSummariesCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorResetHistoricalSummariesCall) Do(f func()) *MockBeaconStateMutatorResetHistoricalSummariesCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorResetHistoricalSummariesCall) DoAndReturn(f func()) *MockBeaconStateMutatorResetHistoricalSummariesCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ResetPreviousEpochAttestations mocks base method. +func (m *MockBeaconStateMutator) ResetPreviousEpochAttestations() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ResetPreviousEpochAttestations") +} + +// ResetPreviousEpochAttestations indicates an expected call of ResetPreviousEpochAttestations. +func (mr *MockBeaconStateMutatorMockRecorder) ResetPreviousEpochAttestations() *MockBeaconStateMutatorResetPreviousEpochAttestationsCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetPreviousEpochAttestations", reflect.TypeOf((*MockBeaconStateMutator)(nil).ResetPreviousEpochAttestations)) + return &MockBeaconStateMutatorResetPreviousEpochAttestationsCall{Call: call} +} + +// MockBeaconStateMutatorResetPreviousEpochAttestationsCall wrap *gomock.Call +type MockBeaconStateMutatorResetPreviousEpochAttestationsCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorResetPreviousEpochAttestationsCall) Return() *MockBeaconStateMutatorResetPreviousEpochAttestationsCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorResetPreviousEpochAttestationsCall) Do(f func()) *MockBeaconStateMutatorResetPreviousEpochAttestationsCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorResetPreviousEpochAttestationsCall) DoAndReturn(f func()) *MockBeaconStateMutatorResetPreviousEpochAttestationsCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetActivationEligibilityEpochForValidatorAtIndex mocks base method. +func (m *MockBeaconStateMutator) SetActivationEligibilityEpochForValidatorAtIndex(arg0 int, arg1 uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetActivationEligibilityEpochForValidatorAtIndex", arg0, arg1) +} + +// SetActivationEligibilityEpochForValidatorAtIndex indicates an expected call of SetActivationEligibilityEpochForValidatorAtIndex. +func (mr *MockBeaconStateMutatorMockRecorder) SetActivationEligibilityEpochForValidatorAtIndex(arg0, arg1 any) *MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetActivationEligibilityEpochForValidatorAtIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetActivationEligibilityEpochForValidatorAtIndex), arg0, arg1) + return &MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall{Call: call} +} + +// MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall wrap *gomock.Call +type MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall) Return() *MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall) Do(f func(int, uint64)) *MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall) DoAndReturn(f func(int, uint64)) *MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetActivationEpochForValidatorAtIndex mocks base method. +func (m *MockBeaconStateMutator) SetActivationEpochForValidatorAtIndex(arg0 int, arg1 uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetActivationEpochForValidatorAtIndex", arg0, arg1) +} + +// SetActivationEpochForValidatorAtIndex indicates an expected call of SetActivationEpochForValidatorAtIndex. +func (mr *MockBeaconStateMutatorMockRecorder) SetActivationEpochForValidatorAtIndex(arg0, arg1 any) *MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetActivationEpochForValidatorAtIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetActivationEpochForValidatorAtIndex), arg0, arg1) + return &MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall{Call: call} +} + +// MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall wrap *gomock.Call +type MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall) Return() *MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall) Do(f func(int, uint64)) *MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall) DoAndReturn(f func(int, uint64)) *MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetBlockRootAt mocks base method. +func (m *MockBeaconStateMutator) SetBlockRootAt(arg0 int, arg1 common.Hash) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetBlockRootAt", arg0, arg1) +} + +// SetBlockRootAt indicates an expected call of SetBlockRootAt. +func (mr *MockBeaconStateMutatorMockRecorder) SetBlockRootAt(arg0, arg1 any) *MockBeaconStateMutatorSetBlockRootAtCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBlockRootAt", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetBlockRootAt), arg0, arg1) + return &MockBeaconStateMutatorSetBlockRootAtCall{Call: call} +} + +// MockBeaconStateMutatorSetBlockRootAtCall wrap *gomock.Call +type MockBeaconStateMutatorSetBlockRootAtCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetBlockRootAtCall) Return() *MockBeaconStateMutatorSetBlockRootAtCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetBlockRootAtCall) Do(f func(int, common.Hash)) *MockBeaconStateMutatorSetBlockRootAtCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetBlockRootAtCall) DoAndReturn(f func(int, common.Hash)) *MockBeaconStateMutatorSetBlockRootAtCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetCurrentEpochParticipationFlags mocks base method. +func (m *MockBeaconStateMutator) SetCurrentEpochParticipationFlags(arg0 []cltypes.ParticipationFlags) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetCurrentEpochParticipationFlags", arg0) +} + +// SetCurrentEpochParticipationFlags indicates an expected call of SetCurrentEpochParticipationFlags. +func (mr *MockBeaconStateMutatorMockRecorder) SetCurrentEpochParticipationFlags(arg0 any) *MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentEpochParticipationFlags", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetCurrentEpochParticipationFlags), arg0) + return &MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall{Call: call} +} + +// MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall wrap *gomock.Call +type MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall) Return() *MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall) Do(f func([]cltypes.ParticipationFlags)) *MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall) DoAndReturn(f func([]cltypes.ParticipationFlags)) *MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetCurrentJustifiedCheckpoint mocks base method. +func (m *MockBeaconStateMutator) SetCurrentJustifiedCheckpoint(arg0 solid.Checkpoint) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetCurrentJustifiedCheckpoint", arg0) +} + +// SetCurrentJustifiedCheckpoint indicates an expected call of SetCurrentJustifiedCheckpoint. +func (mr *MockBeaconStateMutatorMockRecorder) SetCurrentJustifiedCheckpoint(arg0 any) *MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentJustifiedCheckpoint", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetCurrentJustifiedCheckpoint), arg0) + return &MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall{Call: call} +} + +// MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall wrap *gomock.Call +type MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall) Return() *MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall) Do(f func(solid.Checkpoint)) *MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall) DoAndReturn(f func(solid.Checkpoint)) *MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetCurrentSyncCommittee mocks base method. +func (m *MockBeaconStateMutator) SetCurrentSyncCommittee(arg0 *solid.SyncCommittee) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetCurrentSyncCommittee", arg0) +} + +// SetCurrentSyncCommittee indicates an expected call of SetCurrentSyncCommittee. +func (mr *MockBeaconStateMutatorMockRecorder) SetCurrentSyncCommittee(arg0 any) *MockBeaconStateMutatorSetCurrentSyncCommitteeCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentSyncCommittee", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetCurrentSyncCommittee), arg0) + return &MockBeaconStateMutatorSetCurrentSyncCommitteeCall{Call: call} +} + +// MockBeaconStateMutatorSetCurrentSyncCommitteeCall wrap *gomock.Call +type MockBeaconStateMutatorSetCurrentSyncCommitteeCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetCurrentSyncCommitteeCall) Return() *MockBeaconStateMutatorSetCurrentSyncCommitteeCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetCurrentSyncCommitteeCall) Do(f func(*solid.SyncCommittee)) *MockBeaconStateMutatorSetCurrentSyncCommitteeCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetCurrentSyncCommitteeCall) DoAndReturn(f func(*solid.SyncCommittee)) *MockBeaconStateMutatorSetCurrentSyncCommitteeCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetEffectiveBalanceForValidatorAtIndex mocks base method. +func (m *MockBeaconStateMutator) SetEffectiveBalanceForValidatorAtIndex(arg0 int, arg1 uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetEffectiveBalanceForValidatorAtIndex", arg0, arg1) +} + +// SetEffectiveBalanceForValidatorAtIndex indicates an expected call of SetEffectiveBalanceForValidatorAtIndex. +func (mr *MockBeaconStateMutatorMockRecorder) SetEffectiveBalanceForValidatorAtIndex(arg0, arg1 any) *MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEffectiveBalanceForValidatorAtIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetEffectiveBalanceForValidatorAtIndex), arg0, arg1) + return &MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall{Call: call} +} + +// MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall wrap *gomock.Call +type MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall) Return() *MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall) Do(f func(int, uint64)) *MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall) DoAndReturn(f func(int, uint64)) *MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetEpochParticipationForValidatorIndex mocks base method. +func (m *MockBeaconStateMutator) SetEpochParticipationForValidatorIndex(arg0 bool, arg1 int, arg2 cltypes.ParticipationFlags) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetEpochParticipationForValidatorIndex", arg0, arg1, arg2) +} + +// SetEpochParticipationForValidatorIndex indicates an expected call of SetEpochParticipationForValidatorIndex. +func (mr *MockBeaconStateMutatorMockRecorder) SetEpochParticipationForValidatorIndex(arg0, arg1, arg2 any) *MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEpochParticipationForValidatorIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetEpochParticipationForValidatorIndex), arg0, arg1, arg2) + return &MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall{Call: call} +} + +// MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall wrap *gomock.Call +type MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall) Return() *MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall) Do(f func(bool, int, cltypes.ParticipationFlags)) *MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall) DoAndReturn(f func(bool, int, cltypes.ParticipationFlags)) *MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetEth1Data mocks base method. +func (m *MockBeaconStateMutator) SetEth1Data(arg0 *cltypes.Eth1Data) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetEth1Data", arg0) +} + +// SetEth1Data indicates an expected call of SetEth1Data. +func (mr *MockBeaconStateMutatorMockRecorder) SetEth1Data(arg0 any) *MockBeaconStateMutatorSetEth1DataCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEth1Data", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetEth1Data), arg0) + return &MockBeaconStateMutatorSetEth1DataCall{Call: call} +} + +// MockBeaconStateMutatorSetEth1DataCall wrap *gomock.Call +type MockBeaconStateMutatorSetEth1DataCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetEth1DataCall) Return() *MockBeaconStateMutatorSetEth1DataCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetEth1DataCall) Do(f func(*cltypes.Eth1Data)) *MockBeaconStateMutatorSetEth1DataCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetEth1DataCall) DoAndReturn(f func(*cltypes.Eth1Data)) *MockBeaconStateMutatorSetEth1DataCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetEth1DepositIndex mocks base method. +func (m *MockBeaconStateMutator) SetEth1DepositIndex(arg0 uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetEth1DepositIndex", arg0) +} + +// SetEth1DepositIndex indicates an expected call of SetEth1DepositIndex. +func (mr *MockBeaconStateMutatorMockRecorder) SetEth1DepositIndex(arg0 any) *MockBeaconStateMutatorSetEth1DepositIndexCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEth1DepositIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetEth1DepositIndex), arg0) + return &MockBeaconStateMutatorSetEth1DepositIndexCall{Call: call} +} + +// MockBeaconStateMutatorSetEth1DepositIndexCall wrap *gomock.Call +type MockBeaconStateMutatorSetEth1DepositIndexCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetEth1DepositIndexCall) Return() *MockBeaconStateMutatorSetEth1DepositIndexCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetEth1DepositIndexCall) Do(f func(uint64)) *MockBeaconStateMutatorSetEth1DepositIndexCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetEth1DepositIndexCall) DoAndReturn(f func(uint64)) *MockBeaconStateMutatorSetEth1DepositIndexCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetExitEpochForValidatorAtIndex mocks base method. +func (m *MockBeaconStateMutator) SetExitEpochForValidatorAtIndex(arg0 int, arg1 uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetExitEpochForValidatorAtIndex", arg0, arg1) +} + +// SetExitEpochForValidatorAtIndex indicates an expected call of SetExitEpochForValidatorAtIndex. +func (mr *MockBeaconStateMutatorMockRecorder) SetExitEpochForValidatorAtIndex(arg0, arg1 any) *MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetExitEpochForValidatorAtIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetExitEpochForValidatorAtIndex), arg0, arg1) + return &MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall{Call: call} +} + +// MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall wrap *gomock.Call +type MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall) Return() *MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall) Do(f func(int, uint64)) *MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall) DoAndReturn(f func(int, uint64)) *MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetFinalizedCheckpoint mocks base method. +func (m *MockBeaconStateMutator) SetFinalizedCheckpoint(arg0 solid.Checkpoint) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetFinalizedCheckpoint", arg0) +} + +// SetFinalizedCheckpoint indicates an expected call of SetFinalizedCheckpoint. +func (mr *MockBeaconStateMutatorMockRecorder) SetFinalizedCheckpoint(arg0 any) *MockBeaconStateMutatorSetFinalizedCheckpointCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFinalizedCheckpoint", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetFinalizedCheckpoint), arg0) + return &MockBeaconStateMutatorSetFinalizedCheckpointCall{Call: call} +} + +// MockBeaconStateMutatorSetFinalizedCheckpointCall wrap *gomock.Call +type MockBeaconStateMutatorSetFinalizedCheckpointCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetFinalizedCheckpointCall) Return() *MockBeaconStateMutatorSetFinalizedCheckpointCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetFinalizedCheckpointCall) Do(f func(solid.Checkpoint)) *MockBeaconStateMutatorSetFinalizedCheckpointCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetFinalizedCheckpointCall) DoAndReturn(f func(solid.Checkpoint)) *MockBeaconStateMutatorSetFinalizedCheckpointCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetFork mocks base method. +func (m *MockBeaconStateMutator) SetFork(arg0 *cltypes.Fork) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetFork", arg0) +} + +// SetFork indicates an expected call of SetFork. +func (mr *MockBeaconStateMutatorMockRecorder) SetFork(arg0 any) *MockBeaconStateMutatorSetForkCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFork", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetFork), arg0) + return &MockBeaconStateMutatorSetForkCall{Call: call} +} + +// MockBeaconStateMutatorSetForkCall wrap *gomock.Call +type MockBeaconStateMutatorSetForkCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetForkCall) Return() *MockBeaconStateMutatorSetForkCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetForkCall) Do(f func(*cltypes.Fork)) *MockBeaconStateMutatorSetForkCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetForkCall) DoAndReturn(f func(*cltypes.Fork)) *MockBeaconStateMutatorSetForkCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetInactivityScores mocks base method. +func (m *MockBeaconStateMutator) SetInactivityScores(arg0 []uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetInactivityScores", arg0) +} + +// SetInactivityScores indicates an expected call of SetInactivityScores. +func (mr *MockBeaconStateMutatorMockRecorder) SetInactivityScores(arg0 any) *MockBeaconStateMutatorSetInactivityScoresCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetInactivityScores", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetInactivityScores), arg0) + return &MockBeaconStateMutatorSetInactivityScoresCall{Call: call} +} + +// MockBeaconStateMutatorSetInactivityScoresCall wrap *gomock.Call +type MockBeaconStateMutatorSetInactivityScoresCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetInactivityScoresCall) Return() *MockBeaconStateMutatorSetInactivityScoresCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetInactivityScoresCall) Do(f func([]uint64)) *MockBeaconStateMutatorSetInactivityScoresCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetInactivityScoresCall) DoAndReturn(f func([]uint64)) *MockBeaconStateMutatorSetInactivityScoresCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetJustificationBits mocks base method. +func (m *MockBeaconStateMutator) SetJustificationBits(arg0 cltypes.JustificationBits) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetJustificationBits", arg0) +} + +// SetJustificationBits indicates an expected call of SetJustificationBits. +func (mr *MockBeaconStateMutatorMockRecorder) SetJustificationBits(arg0 any) *MockBeaconStateMutatorSetJustificationBitsCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetJustificationBits", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetJustificationBits), arg0) + return &MockBeaconStateMutatorSetJustificationBitsCall{Call: call} +} + +// MockBeaconStateMutatorSetJustificationBitsCall wrap *gomock.Call +type MockBeaconStateMutatorSetJustificationBitsCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetJustificationBitsCall) Return() *MockBeaconStateMutatorSetJustificationBitsCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetJustificationBitsCall) Do(f func(cltypes.JustificationBits)) *MockBeaconStateMutatorSetJustificationBitsCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetJustificationBitsCall) DoAndReturn(f func(cltypes.JustificationBits)) *MockBeaconStateMutatorSetJustificationBitsCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetLatestBlockHeader mocks base method. +func (m *MockBeaconStateMutator) SetLatestBlockHeader(arg0 *cltypes.BeaconBlockHeader) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetLatestBlockHeader", arg0) +} + +// SetLatestBlockHeader indicates an expected call of SetLatestBlockHeader. +func (mr *MockBeaconStateMutatorMockRecorder) SetLatestBlockHeader(arg0 any) *MockBeaconStateMutatorSetLatestBlockHeaderCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLatestBlockHeader", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetLatestBlockHeader), arg0) + return &MockBeaconStateMutatorSetLatestBlockHeaderCall{Call: call} +} + +// MockBeaconStateMutatorSetLatestBlockHeaderCall wrap *gomock.Call +type MockBeaconStateMutatorSetLatestBlockHeaderCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetLatestBlockHeaderCall) Return() *MockBeaconStateMutatorSetLatestBlockHeaderCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetLatestBlockHeaderCall) Do(f func(*cltypes.BeaconBlockHeader)) *MockBeaconStateMutatorSetLatestBlockHeaderCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetLatestBlockHeaderCall) DoAndReturn(f func(*cltypes.BeaconBlockHeader)) *MockBeaconStateMutatorSetLatestBlockHeaderCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetLatestExecutionPayloadHeader mocks base method. +func (m *MockBeaconStateMutator) SetLatestExecutionPayloadHeader(arg0 *cltypes.Eth1Header) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetLatestExecutionPayloadHeader", arg0) +} + +// SetLatestExecutionPayloadHeader indicates an expected call of SetLatestExecutionPayloadHeader. +func (mr *MockBeaconStateMutatorMockRecorder) SetLatestExecutionPayloadHeader(arg0 any) *MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLatestExecutionPayloadHeader", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetLatestExecutionPayloadHeader), arg0) + return &MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall{Call: call} +} + +// MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall wrap *gomock.Call +type MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall) Return() *MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall) Do(f func(*cltypes.Eth1Header)) *MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall) DoAndReturn(f func(*cltypes.Eth1Header)) *MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetNextSyncCommittee mocks base method. +func (m *MockBeaconStateMutator) SetNextSyncCommittee(arg0 *solid.SyncCommittee) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetNextSyncCommittee", arg0) +} + +// SetNextSyncCommittee indicates an expected call of SetNextSyncCommittee. +func (mr *MockBeaconStateMutatorMockRecorder) SetNextSyncCommittee(arg0 any) *MockBeaconStateMutatorSetNextSyncCommitteeCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNextSyncCommittee", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetNextSyncCommittee), arg0) + return &MockBeaconStateMutatorSetNextSyncCommitteeCall{Call: call} +} + +// MockBeaconStateMutatorSetNextSyncCommitteeCall wrap *gomock.Call +type MockBeaconStateMutatorSetNextSyncCommitteeCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetNextSyncCommitteeCall) Return() *MockBeaconStateMutatorSetNextSyncCommitteeCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetNextSyncCommitteeCall) Do(f func(*solid.SyncCommittee)) *MockBeaconStateMutatorSetNextSyncCommitteeCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetNextSyncCommitteeCall) DoAndReturn(f func(*solid.SyncCommittee)) *MockBeaconStateMutatorSetNextSyncCommitteeCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetNextWithdrawalIndex mocks base method. +func (m *MockBeaconStateMutator) SetNextWithdrawalIndex(arg0 uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetNextWithdrawalIndex", arg0) +} + +// SetNextWithdrawalIndex indicates an expected call of SetNextWithdrawalIndex. +func (mr *MockBeaconStateMutatorMockRecorder) SetNextWithdrawalIndex(arg0 any) *MockBeaconStateMutatorSetNextWithdrawalIndexCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNextWithdrawalIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetNextWithdrawalIndex), arg0) + return &MockBeaconStateMutatorSetNextWithdrawalIndexCall{Call: call} +} + +// MockBeaconStateMutatorSetNextWithdrawalIndexCall wrap *gomock.Call +type MockBeaconStateMutatorSetNextWithdrawalIndexCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetNextWithdrawalIndexCall) Return() *MockBeaconStateMutatorSetNextWithdrawalIndexCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetNextWithdrawalIndexCall) Do(f func(uint64)) *MockBeaconStateMutatorSetNextWithdrawalIndexCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetNextWithdrawalIndexCall) DoAndReturn(f func(uint64)) *MockBeaconStateMutatorSetNextWithdrawalIndexCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetNextWithdrawalValidatorIndex mocks base method. +func (m *MockBeaconStateMutator) SetNextWithdrawalValidatorIndex(arg0 uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetNextWithdrawalValidatorIndex", arg0) +} + +// SetNextWithdrawalValidatorIndex indicates an expected call of SetNextWithdrawalValidatorIndex. +func (mr *MockBeaconStateMutatorMockRecorder) SetNextWithdrawalValidatorIndex(arg0 any) *MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNextWithdrawalValidatorIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetNextWithdrawalValidatorIndex), arg0) + return &MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall{Call: call} +} + +// MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall wrap *gomock.Call +type MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall) Return() *MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall) Do(f func(uint64)) *MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall) DoAndReturn(f func(uint64)) *MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetPreviousEpochParticipationFlags mocks base method. +func (m *MockBeaconStateMutator) SetPreviousEpochParticipationFlags(arg0 []cltypes.ParticipationFlags) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetPreviousEpochParticipationFlags", arg0) +} + +// SetPreviousEpochParticipationFlags indicates an expected call of SetPreviousEpochParticipationFlags. +func (mr *MockBeaconStateMutatorMockRecorder) SetPreviousEpochParticipationFlags(arg0 any) *MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPreviousEpochParticipationFlags", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetPreviousEpochParticipationFlags), arg0) + return &MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall{Call: call} +} + +// MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall wrap *gomock.Call +type MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall) Return() *MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall) Do(f func([]cltypes.ParticipationFlags)) *MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall) DoAndReturn(f func([]cltypes.ParticipationFlags)) *MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetPreviousJustifiedCheckpoint mocks base method. +func (m *MockBeaconStateMutator) SetPreviousJustifiedCheckpoint(arg0 solid.Checkpoint) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetPreviousJustifiedCheckpoint", arg0) +} + +// SetPreviousJustifiedCheckpoint indicates an expected call of SetPreviousJustifiedCheckpoint. +func (mr *MockBeaconStateMutatorMockRecorder) SetPreviousJustifiedCheckpoint(arg0 any) *MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPreviousJustifiedCheckpoint", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetPreviousJustifiedCheckpoint), arg0) + return &MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall{Call: call} +} + +// MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall wrap *gomock.Call +type MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall) Return() *MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall) Do(f func(solid.Checkpoint)) *MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall) DoAndReturn(f func(solid.Checkpoint)) *MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetRandaoMixAt mocks base method. +func (m *MockBeaconStateMutator) SetRandaoMixAt(arg0 int, arg1 common.Hash) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetRandaoMixAt", arg0, arg1) +} + +// SetRandaoMixAt indicates an expected call of SetRandaoMixAt. +func (mr *MockBeaconStateMutatorMockRecorder) SetRandaoMixAt(arg0, arg1 any) *MockBeaconStateMutatorSetRandaoMixAtCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetRandaoMixAt", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetRandaoMixAt), arg0, arg1) + return &MockBeaconStateMutatorSetRandaoMixAtCall{Call: call} +} + +// MockBeaconStateMutatorSetRandaoMixAtCall wrap *gomock.Call +type MockBeaconStateMutatorSetRandaoMixAtCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetRandaoMixAtCall) Return() *MockBeaconStateMutatorSetRandaoMixAtCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetRandaoMixAtCall) Do(f func(int, common.Hash)) *MockBeaconStateMutatorSetRandaoMixAtCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetRandaoMixAtCall) DoAndReturn(f func(int, common.Hash)) *MockBeaconStateMutatorSetRandaoMixAtCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetSlashingSegmentAt mocks base method. +func (m *MockBeaconStateMutator) SetSlashingSegmentAt(arg0 int, arg1 uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetSlashingSegmentAt", arg0, arg1) +} + +// SetSlashingSegmentAt indicates an expected call of SetSlashingSegmentAt. +func (mr *MockBeaconStateMutatorMockRecorder) SetSlashingSegmentAt(arg0, arg1 any) *MockBeaconStateMutatorSetSlashingSegmentAtCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSlashingSegmentAt", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetSlashingSegmentAt), arg0, arg1) + return &MockBeaconStateMutatorSetSlashingSegmentAtCall{Call: call} +} + +// MockBeaconStateMutatorSetSlashingSegmentAtCall wrap *gomock.Call +type MockBeaconStateMutatorSetSlashingSegmentAtCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetSlashingSegmentAtCall) Return() *MockBeaconStateMutatorSetSlashingSegmentAtCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetSlashingSegmentAtCall) Do(f func(int, uint64)) *MockBeaconStateMutatorSetSlashingSegmentAtCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetSlashingSegmentAtCall) DoAndReturn(f func(int, uint64)) *MockBeaconStateMutatorSetSlashingSegmentAtCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetSlot mocks base method. +func (m *MockBeaconStateMutator) SetSlot(arg0 uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetSlot", arg0) +} + +// SetSlot indicates an expected call of SetSlot. +func (mr *MockBeaconStateMutatorMockRecorder) SetSlot(arg0 any) *MockBeaconStateMutatorSetSlotCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSlot", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetSlot), arg0) + return &MockBeaconStateMutatorSetSlotCall{Call: call} +} + +// MockBeaconStateMutatorSetSlotCall wrap *gomock.Call +type MockBeaconStateMutatorSetSlotCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetSlotCall) Return() *MockBeaconStateMutatorSetSlotCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetSlotCall) Do(f func(uint64)) *MockBeaconStateMutatorSetSlotCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetSlotCall) DoAndReturn(f func(uint64)) *MockBeaconStateMutatorSetSlotCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetStateRootAt mocks base method. +func (m *MockBeaconStateMutator) SetStateRootAt(arg0 int, arg1 common.Hash) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetStateRootAt", arg0, arg1) +} + +// SetStateRootAt indicates an expected call of SetStateRootAt. +func (mr *MockBeaconStateMutatorMockRecorder) SetStateRootAt(arg0, arg1 any) *MockBeaconStateMutatorSetStateRootAtCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetStateRootAt", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetStateRootAt), arg0, arg1) + return &MockBeaconStateMutatorSetStateRootAtCall{Call: call} +} + +// MockBeaconStateMutatorSetStateRootAtCall wrap *gomock.Call +type MockBeaconStateMutatorSetStateRootAtCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetStateRootAtCall) Return() *MockBeaconStateMutatorSetStateRootAtCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetStateRootAtCall) Do(f func(int, common.Hash)) *MockBeaconStateMutatorSetStateRootAtCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetStateRootAtCall) DoAndReturn(f func(int, common.Hash)) *MockBeaconStateMutatorSetStateRootAtCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetValidatorAtIndex mocks base method. +func (m *MockBeaconStateMutator) SetValidatorAtIndex(arg0 int, arg1 solid.Validator) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetValidatorAtIndex", arg0, arg1) +} + +// SetValidatorAtIndex indicates an expected call of SetValidatorAtIndex. +func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorAtIndex(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorAtIndexCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorAtIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorAtIndex), arg0, arg1) + return &MockBeaconStateMutatorSetValidatorAtIndexCall{Call: call} +} + +// MockBeaconStateMutatorSetValidatorAtIndexCall wrap *gomock.Call +type MockBeaconStateMutatorSetValidatorAtIndexCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetValidatorAtIndexCall) Return() *MockBeaconStateMutatorSetValidatorAtIndexCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetValidatorAtIndexCall) Do(f func(int, solid.Validator)) *MockBeaconStateMutatorSetValidatorAtIndexCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetValidatorAtIndexCall) DoAndReturn(f func(int, solid.Validator)) *MockBeaconStateMutatorSetValidatorAtIndexCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetValidatorBalance mocks base method. +func (m *MockBeaconStateMutator) SetValidatorBalance(arg0 int, arg1 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetValidatorBalance", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetValidatorBalance indicates an expected call of SetValidatorBalance. +func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorBalance(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorBalanceCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorBalance", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorBalance), arg0, arg1) + return &MockBeaconStateMutatorSetValidatorBalanceCall{Call: call} +} + +// MockBeaconStateMutatorSetValidatorBalanceCall wrap *gomock.Call +type MockBeaconStateMutatorSetValidatorBalanceCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetValidatorBalanceCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorBalanceCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetValidatorBalanceCall) Do(f func(int, uint64) error) *MockBeaconStateMutatorSetValidatorBalanceCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetValidatorBalanceCall) DoAndReturn(f func(int, uint64) error) *MockBeaconStateMutatorSetValidatorBalanceCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetValidatorInactivityScore mocks base method. +func (m *MockBeaconStateMutator) SetValidatorInactivityScore(arg0 int, arg1 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetValidatorInactivityScore", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetValidatorInactivityScore indicates an expected call of SetValidatorInactivityScore. +func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorInactivityScore(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorInactivityScoreCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorInactivityScore", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorInactivityScore), arg0, arg1) + return &MockBeaconStateMutatorSetValidatorInactivityScoreCall{Call: call} +} + +// MockBeaconStateMutatorSetValidatorInactivityScoreCall wrap *gomock.Call +type MockBeaconStateMutatorSetValidatorInactivityScoreCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetValidatorInactivityScoreCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorInactivityScoreCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetValidatorInactivityScoreCall) Do(f func(int, uint64) error) *MockBeaconStateMutatorSetValidatorInactivityScoreCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetValidatorInactivityScoreCall) DoAndReturn(f func(int, uint64) error) *MockBeaconStateMutatorSetValidatorInactivityScoreCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetValidatorIsCurrentMatchingHeadAttester mocks base method. +func (m *MockBeaconStateMutator) SetValidatorIsCurrentMatchingHeadAttester(arg0 int, arg1 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetValidatorIsCurrentMatchingHeadAttester", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetValidatorIsCurrentMatchingHeadAttester indicates an expected call of SetValidatorIsCurrentMatchingHeadAttester. +func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorIsCurrentMatchingHeadAttester(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorIsCurrentMatchingHeadAttester", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorIsCurrentMatchingHeadAttester), arg0, arg1) + return &MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall{Call: call} +} + +// MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall wrap *gomock.Call +type MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall) Do(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall) DoAndReturn(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetValidatorIsCurrentMatchingSourceAttester mocks base method. +func (m *MockBeaconStateMutator) SetValidatorIsCurrentMatchingSourceAttester(arg0 int, arg1 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetValidatorIsCurrentMatchingSourceAttester", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetValidatorIsCurrentMatchingSourceAttester indicates an expected call of SetValidatorIsCurrentMatchingSourceAttester. +func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorIsCurrentMatchingSourceAttester(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorIsCurrentMatchingSourceAttester", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorIsCurrentMatchingSourceAttester), arg0, arg1) + return &MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall{Call: call} +} + +// MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall wrap *gomock.Call +type MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall) Do(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall) DoAndReturn(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetValidatorIsCurrentMatchingTargetAttester mocks base method. +func (m *MockBeaconStateMutator) SetValidatorIsCurrentMatchingTargetAttester(arg0 int, arg1 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetValidatorIsCurrentMatchingTargetAttester", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetValidatorIsCurrentMatchingTargetAttester indicates an expected call of SetValidatorIsCurrentMatchingTargetAttester. +func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorIsCurrentMatchingTargetAttester(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorIsCurrentMatchingTargetAttester", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorIsCurrentMatchingTargetAttester), arg0, arg1) + return &MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall{Call: call} +} + +// MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall wrap *gomock.Call +type MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall) Do(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall) DoAndReturn(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetValidatorIsPreviousMatchingHeadAttester mocks base method. +func (m *MockBeaconStateMutator) SetValidatorIsPreviousMatchingHeadAttester(arg0 int, arg1 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetValidatorIsPreviousMatchingHeadAttester", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetValidatorIsPreviousMatchingHeadAttester indicates an expected call of SetValidatorIsPreviousMatchingHeadAttester. +func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorIsPreviousMatchingHeadAttester(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorIsPreviousMatchingHeadAttester", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorIsPreviousMatchingHeadAttester), arg0, arg1) + return &MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall{Call: call} +} + +// MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall wrap *gomock.Call +type MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall) Do(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall) DoAndReturn(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetValidatorIsPreviousMatchingSourceAttester mocks base method. +func (m *MockBeaconStateMutator) SetValidatorIsPreviousMatchingSourceAttester(arg0 int, arg1 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetValidatorIsPreviousMatchingSourceAttester", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetValidatorIsPreviousMatchingSourceAttester indicates an expected call of SetValidatorIsPreviousMatchingSourceAttester. +func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorIsPreviousMatchingSourceAttester(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorIsPreviousMatchingSourceAttester", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorIsPreviousMatchingSourceAttester), arg0, arg1) + return &MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall{Call: call} +} + +// MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall wrap *gomock.Call +type MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall) Do(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall) DoAndReturn(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetValidatorIsPreviousMatchingTargetAttester mocks base method. +func (m *MockBeaconStateMutator) SetValidatorIsPreviousMatchingTargetAttester(arg0 int, arg1 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetValidatorIsPreviousMatchingTargetAttester", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetValidatorIsPreviousMatchingTargetAttester indicates an expected call of SetValidatorIsPreviousMatchingTargetAttester. +func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorIsPreviousMatchingTargetAttester(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorIsPreviousMatchingTargetAttester", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorIsPreviousMatchingTargetAttester), arg0, arg1) + return &MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall{Call: call} +} + +// MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall wrap *gomock.Call +type MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall) Do(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall) DoAndReturn(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetValidatorMinCurrentInclusionDelayAttestation mocks base method. +func (m *MockBeaconStateMutator) SetValidatorMinCurrentInclusionDelayAttestation(arg0 int, arg1 *solid.PendingAttestation) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetValidatorMinCurrentInclusionDelayAttestation", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetValidatorMinCurrentInclusionDelayAttestation indicates an expected call of SetValidatorMinCurrentInclusionDelayAttestation. +func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorMinCurrentInclusionDelayAttestation(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorMinCurrentInclusionDelayAttestation", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorMinCurrentInclusionDelayAttestation), arg0, arg1) + return &MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall{Call: call} +} + +// MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall wrap *gomock.Call +type MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall) Do(f func(int, *solid.PendingAttestation) error) *MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall) DoAndReturn(f func(int, *solid.PendingAttestation) error) *MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetValidatorMinPreviousInclusionDelayAttestation mocks base method. +func (m *MockBeaconStateMutator) SetValidatorMinPreviousInclusionDelayAttestation(arg0 int, arg1 *solid.PendingAttestation) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetValidatorMinPreviousInclusionDelayAttestation", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetValidatorMinPreviousInclusionDelayAttestation indicates an expected call of SetValidatorMinPreviousInclusionDelayAttestation. +func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorMinPreviousInclusionDelayAttestation(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorMinPreviousInclusionDelayAttestation", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorMinPreviousInclusionDelayAttestation), arg0, arg1) + return &MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall{Call: call} +} + +// MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall wrap *gomock.Call +type MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall) Do(f func(int, *solid.PendingAttestation) error) *MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall) DoAndReturn(f func(int, *solid.PendingAttestation) error) *MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetValidatorSlashed mocks base method. +func (m *MockBeaconStateMutator) SetValidatorSlashed(arg0 int, arg1 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetValidatorSlashed", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetValidatorSlashed indicates an expected call of SetValidatorSlashed. +func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorSlashed(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorSlashedCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorSlashed", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorSlashed), arg0, arg1) + return &MockBeaconStateMutatorSetValidatorSlashedCall{Call: call} +} + +// MockBeaconStateMutatorSetValidatorSlashedCall wrap *gomock.Call +type MockBeaconStateMutatorSetValidatorSlashedCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetValidatorSlashedCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorSlashedCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetValidatorSlashedCall) Do(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorSlashedCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetValidatorSlashedCall) DoAndReturn(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorSlashedCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetVersion mocks base method. +func (m *MockBeaconStateMutator) SetVersion(arg0 clparams.StateVersion) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetVersion", arg0) +} + +// SetVersion indicates an expected call of SetVersion. +func (mr *MockBeaconStateMutatorMockRecorder) SetVersion(arg0 any) *MockBeaconStateMutatorSetVersionCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetVersion", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetVersion), arg0) + return &MockBeaconStateMutatorSetVersionCall{Call: call} +} + +// MockBeaconStateMutatorSetVersionCall wrap *gomock.Call +type MockBeaconStateMutatorSetVersionCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetVersionCall) Return() *MockBeaconStateMutatorSetVersionCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetVersionCall) Do(f func(clparams.StateVersion)) *MockBeaconStateMutatorSetVersionCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetVersionCall) DoAndReturn(f func(clparams.StateVersion)) *MockBeaconStateMutatorSetVersionCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetWithdrawableEpochForValidatorAtIndex mocks base method. +func (m *MockBeaconStateMutator) SetWithdrawableEpochForValidatorAtIndex(arg0 int, arg1 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetWithdrawableEpochForValidatorAtIndex", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetWithdrawableEpochForValidatorAtIndex indicates an expected call of SetWithdrawableEpochForValidatorAtIndex. +func (mr *MockBeaconStateMutatorMockRecorder) SetWithdrawableEpochForValidatorAtIndex(arg0, arg1 any) *MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWithdrawableEpochForValidatorAtIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetWithdrawableEpochForValidatorAtIndex), arg0, arg1) + return &MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall{Call: call} +} + +// MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall wrap *gomock.Call +type MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall) Return(arg0 error) *MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall) Do(f func(int, uint64) error) *MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall) DoAndReturn(f func(int, uint64) error) *MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetWithdrawalCredentialForValidatorAtIndex mocks base method. +func (m *MockBeaconStateMutator) SetWithdrawalCredentialForValidatorAtIndex(arg0 int, arg1 common.Hash) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetWithdrawalCredentialForValidatorAtIndex", arg0, arg1) +} + +// SetWithdrawalCredentialForValidatorAtIndex indicates an expected call of SetWithdrawalCredentialForValidatorAtIndex. +func (mr *MockBeaconStateMutatorMockRecorder) SetWithdrawalCredentialForValidatorAtIndex(arg0, arg1 any) *MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWithdrawalCredentialForValidatorAtIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetWithdrawalCredentialForValidatorAtIndex), arg0, arg1) + return &MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall{Call: call} +} + +// MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall wrap *gomock.Call +type MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall) Return() *MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall) Do(f func(int, common.Hash)) *MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall) DoAndReturn(f func(int, common.Hash)) *MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +func (c *MockBeaconStateMutator) SetPreviousEpochAttestations(attestations *solid.ListSSZ[*solid.PendingAttestation]) { + +} \ No newline at end of file diff --git a/cl/phase1/core/state/mock_services/beacon_state_reader_mock.go b/cl/abstract/mock_services/beacon_state_reader_mock.go similarity index 54% rename from cl/phase1/core/state/mock_services/beacon_state_reader_mock.go rename to cl/abstract/mock_services/beacon_state_reader_mock.go index 94875a81103..3f92cdb0131 100644 --- a/cl/phase1/core/state/mock_services/beacon_state_reader_mock.go +++ b/cl/abstract/mock_services/beacon_state_reader_mock.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ledgerwatch/erigon/cl/phase1/core/state (interfaces: BeaconStateReader) +// Source: github.com/ledgerwatch/erigon/cl/abstract (interfaces: BeaconStateReader) // // Generated by this command: // @@ -13,6 +13,8 @@ import ( reflect "reflect" common "github.com/ledgerwatch/erigon-lib/common" + clparams "github.com/ledgerwatch/erigon/cl/clparams" + solid "github.com/ledgerwatch/erigon/cl/cltypes/solid" gomock "go.uber.org/mock/gomock" ) @@ -77,6 +79,44 @@ func (c *MockBeaconStateReaderCommitteeCountCall) DoAndReturn(f func(uint64) uin return c } +// GenesisValidatorsRoot mocks base method. +func (m *MockBeaconStateReader) GenesisValidatorsRoot() common.Hash { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GenesisValidatorsRoot") + ret0, _ := ret[0].(common.Hash) + return ret0 +} + +// GenesisValidatorsRoot indicates an expected call of GenesisValidatorsRoot. +func (mr *MockBeaconStateReaderMockRecorder) GenesisValidatorsRoot() *MockBeaconStateReaderGenesisValidatorsRootCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenesisValidatorsRoot", reflect.TypeOf((*MockBeaconStateReader)(nil).GenesisValidatorsRoot)) + return &MockBeaconStateReaderGenesisValidatorsRootCall{Call: call} +} + +// MockBeaconStateReaderGenesisValidatorsRootCall wrap *gomock.Call +type MockBeaconStateReaderGenesisValidatorsRootCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateReaderGenesisValidatorsRootCall) Return(arg0 common.Hash) *MockBeaconStateReaderGenesisValidatorsRootCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateReaderGenesisValidatorsRootCall) Do(f func() common.Hash) *MockBeaconStateReaderGenesisValidatorsRootCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateReaderGenesisValidatorsRootCall) DoAndReturn(f func() common.Hash) *MockBeaconStateReaderGenesisValidatorsRootCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // GetDomain mocks base method. func (m *MockBeaconStateReader) GetDomain(arg0 [4]byte, arg1 uint64) ([]byte, error) { m.ctrl.T.Helper() @@ -116,6 +156,45 @@ func (c *MockBeaconStateReaderGetDomainCall) DoAndReturn(f func([4]byte, uint64) return c } +// ValidatorForValidatorIndex mocks base method. +func (m *MockBeaconStateReader) ValidatorForValidatorIndex(arg0 int) (solid.Validator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidatorForValidatorIndex", arg0) + ret0, _ := ret[0].(solid.Validator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidatorForValidatorIndex indicates an expected call of ValidatorForValidatorIndex. +func (mr *MockBeaconStateReaderMockRecorder) ValidatorForValidatorIndex(arg0 any) *MockBeaconStateReaderValidatorForValidatorIndexCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidatorForValidatorIndex", reflect.TypeOf((*MockBeaconStateReader)(nil).ValidatorForValidatorIndex), arg0) + return &MockBeaconStateReaderValidatorForValidatorIndexCall{Call: call} +} + +// MockBeaconStateReaderValidatorForValidatorIndexCall wrap *gomock.Call +type MockBeaconStateReaderValidatorForValidatorIndexCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateReaderValidatorForValidatorIndexCall) Return(arg0 solid.Validator, arg1 error) *MockBeaconStateReaderValidatorForValidatorIndexCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateReaderValidatorForValidatorIndexCall) Do(f func(int) (solid.Validator, error)) *MockBeaconStateReaderValidatorForValidatorIndexCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateReaderValidatorForValidatorIndexCall) DoAndReturn(f func(int) (solid.Validator, error)) *MockBeaconStateReaderValidatorForValidatorIndexCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // ValidatorPublicKey mocks base method. func (m *MockBeaconStateReader) ValidatorPublicKey(arg0 int) (common.Bytes48, error) { m.ctrl.T.Helper() @@ -154,3 +233,41 @@ func (c *MockBeaconStateReaderValidatorPublicKeyCall) DoAndReturn(f func(int) (c c.Call = c.Call.DoAndReturn(f) return c } + +// Version mocks base method. +func (m *MockBeaconStateReader) Version() clparams.StateVersion { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version") + ret0, _ := ret[0].(clparams.StateVersion) + return ret0 +} + +// Version indicates an expected call of Version. +func (mr *MockBeaconStateReaderMockRecorder) Version() *MockBeaconStateReaderVersionCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockBeaconStateReader)(nil).Version)) + return &MockBeaconStateReaderVersionCall{Call: call} +} + +// MockBeaconStateReaderVersionCall wrap *gomock.Call +type MockBeaconStateReaderVersionCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateReaderVersionCall) Return(arg0 clparams.StateVersion) *MockBeaconStateReaderVersionCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateReaderVersionCall) Do(f func() clparams.StateVersion) *MockBeaconStateReaderVersionCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateReaderVersionCall) DoAndReturn(f func() clparams.StateVersion) *MockBeaconStateReaderVersionCall { + c.Call = c.Call.DoAndReturn(f) + return c +} diff --git a/cl/beacon/synced_data/interface.go b/cl/beacon/synced_data/interface.go index e7566b8eea8..c32b4ec2c78 100644 --- a/cl/beacon/synced_data/interface.go +++ b/cl/beacon/synced_data/interface.go @@ -1,12 +1,16 @@ package synced_data -import "github.com/ledgerwatch/erigon/cl/phase1/core/state" +import ( + "github.com/ledgerwatch/erigon/cl/abstract" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" +) //go:generate mockgen -typed=true -destination=./mock_services/synced_data_mock.go -package=mock_services . SyncedData type SyncedData interface { - OnHeadState(newState *state.CachingBeaconState) (err error) + OnHeadState(newState *state.CachingBeaconState) error HeadState() *state.CachingBeaconState - HeadStateReader() state.BeaconStateReader + HeadStateReader() abstract.BeaconStateReader + HeadStateMutator() abstract.BeaconStateMutator Syncing() bool HeadSlot() uint64 } diff --git a/cl/beacon/synced_data/mock_services/synced_data_mock.go b/cl/beacon/synced_data/mock_services/synced_data_mock.go index a5111b2e485..819fb61e280 100644 --- a/cl/beacon/synced_data/mock_services/synced_data_mock.go +++ b/cl/beacon/synced_data/mock_services/synced_data_mock.go @@ -12,6 +12,7 @@ package mock_services import ( reflect "reflect" + abstract "github.com/ledgerwatch/erigon/cl/abstract" state "github.com/ledgerwatch/erigon/cl/phase1/core/state" gomock "go.uber.org/mock/gomock" ) @@ -115,11 +116,49 @@ func (c *MockSyncedDataHeadStateCall) DoAndReturn(f func() *state.CachingBeaconS return c } +// HeadStateMutator mocks base method. +func (m *MockSyncedData) HeadStateMutator() abstract.BeaconStateMutator { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeadStateMutator") + ret0, _ := ret[0].(abstract.BeaconStateMutator) + return ret0 +} + +// HeadStateMutator indicates an expected call of HeadStateMutator. +func (mr *MockSyncedDataMockRecorder) HeadStateMutator() *MockSyncedDataHeadStateMutatorCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadStateMutator", reflect.TypeOf((*MockSyncedData)(nil).HeadStateMutator)) + return &MockSyncedDataHeadStateMutatorCall{Call: call} +} + +// MockSyncedDataHeadStateMutatorCall wrap *gomock.Call +type MockSyncedDataHeadStateMutatorCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSyncedDataHeadStateMutatorCall) Return(arg0 abstract.BeaconStateMutator) *MockSyncedDataHeadStateMutatorCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSyncedDataHeadStateMutatorCall) Do(f func() abstract.BeaconStateMutator) *MockSyncedDataHeadStateMutatorCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSyncedDataHeadStateMutatorCall) DoAndReturn(f func() abstract.BeaconStateMutator) *MockSyncedDataHeadStateMutatorCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // HeadStateReader mocks base method. -func (m *MockSyncedData) HeadStateReader() state.BeaconStateReader { +func (m *MockSyncedData) HeadStateReader() abstract.BeaconStateReader { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "HeadStateReader") - ret0, _ := ret[0].(state.BeaconStateReader) + ret0, _ := ret[0].(abstract.BeaconStateReader) return ret0 } @@ -136,19 +175,19 @@ type MockSyncedDataHeadStateReaderCall struct { } // Return rewrite *gomock.Call.Return -func (c *MockSyncedDataHeadStateReaderCall) Return(arg0 state.BeaconStateReader) *MockSyncedDataHeadStateReaderCall { +func (c *MockSyncedDataHeadStateReaderCall) Return(arg0 abstract.BeaconStateReader) *MockSyncedDataHeadStateReaderCall { c.Call = c.Call.Return(arg0) return c } // Do rewrite *gomock.Call.Do -func (c *MockSyncedDataHeadStateReaderCall) Do(f func() state.BeaconStateReader) *MockSyncedDataHeadStateReaderCall { +func (c *MockSyncedDataHeadStateReaderCall) Do(f func() abstract.BeaconStateReader) *MockSyncedDataHeadStateReaderCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockSyncedDataHeadStateReaderCall) DoAndReturn(f func() state.BeaconStateReader) *MockSyncedDataHeadStateReaderCall { +func (c *MockSyncedDataHeadStateReaderCall) DoAndReturn(f func() abstract.BeaconStateReader) *MockSyncedDataHeadStateReaderCall { c.Call = c.Call.DoAndReturn(f) return c } diff --git a/cl/beacon/synced_data/synced_data.go b/cl/beacon/synced_data/synced_data.go index 9248142d902..58bfa673f5d 100644 --- a/cl/beacon/synced_data/synced_data.go +++ b/cl/beacon/synced_data/synced_data.go @@ -3,6 +3,7 @@ package synced_data import ( "sync/atomic" + "github.com/ledgerwatch/erigon/cl/abstract" "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/phase1/core/state" ) @@ -43,7 +44,15 @@ func (s *SyncedDataManager) HeadState() *state.CachingBeaconState { return nil } -func (s *SyncedDataManager) HeadStateReader() state.BeaconStateReader { +func (s *SyncedDataManager) HeadStateReader() abstract.BeaconStateReader { + headstate := s.HeadState() + if headstate == nil { + return nil + } + return headstate +} + +func (s *SyncedDataManager) HeadStateMutator() abstract.BeaconStateMutator { headstate := s.HeadState() if headstate == nil { return nil diff --git a/cl/phase1/core/state/interface.go b/cl/phase1/core/state/interface.go deleted file mode 100644 index eb1e14a3973..00000000000 --- a/cl/phase1/core/state/interface.go +++ /dev/null @@ -1,12 +0,0 @@ -package state - -import libcommon "github.com/ledgerwatch/erigon-lib/common" - -// BeaconStateReader is an interface for reading the beacon state. -// -//go:generate mockgen -typed=true -destination=./mock_services/beacon_state_reader_mock.go -package=mock_services . BeaconStateReader -type BeaconStateReader interface { - ValidatorPublicKey(index int) (libcommon.Bytes48, error) - GetDomain(domainType [4]byte, epoch uint64) ([]byte, error) - CommitteeCount(epoch uint64) uint64 -} diff --git a/cl/phase1/network/services/attestation_service_test.go b/cl/phase1/network/services/attestation_service_test.go index 18428bf79ec..1e2cb816d15 100644 --- a/cl/phase1/network/services/attestation_service_test.go +++ b/cl/phase1/network/services/attestation_service_test.go @@ -10,12 +10,12 @@ import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/types/ssz" + "github.com/ledgerwatch/erigon/cl/abstract" + mockState "github.com/ledgerwatch/erigon/cl/abstract/mock_services" mockSync "github.com/ledgerwatch/erigon/cl/beacon/synced_data/mock_services" "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/cltypes" "github.com/ledgerwatch/erigon/cl/cltypes/solid" - "github.com/ledgerwatch/erigon/cl/phase1/core/state" - mockState "github.com/ledgerwatch/erigon/cl/phase1/core/state/mock_services" "github.com/ledgerwatch/erigon/cl/phase1/forkchoice/mock_services" "github.com/ledgerwatch/erigon/cl/utils/eth_clock" mockCommittee "github.com/ledgerwatch/erigon/cl/validator/committee_subscription/mock_services" @@ -84,7 +84,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "Test attestation with committee index out of range", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 1 } }, @@ -99,7 +99,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "Test attestation with wrong subnet", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 5 } computeSubnetForAttestation = func(_, _, _, _, _ uint64) uint64 { @@ -117,7 +117,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "Test attestation with wrong slot (current_slot < slot)", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 5 } computeSubnetForAttestation = func(_, _, _, _, _ uint64) uint64 { @@ -136,7 +136,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "Attestation is aggregated", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 5 } computeSubnetForAttestation = func(_, _, _, _, _ uint64) uint64 { @@ -159,7 +159,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "Attestation is empty", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 5 } computeSubnetForAttestation = func(_, _, _, _, _ uint64) uint64 { @@ -182,7 +182,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "invalid signature", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 5 } computeSubnetForAttestation = func(_, _, _, _, _ uint64) uint64 { @@ -209,7 +209,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "block header not found", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 8 } computeSubnetForAttestation = func(_, _, _, _, _ uint64) uint64 { @@ -236,7 +236,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "invalid target block", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 8 } computeSubnetForAttestation = func(_, _, _, _, _ uint64) uint64 { @@ -266,7 +266,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "invalid finality checkpoint", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 8 } computeSubnetForAttestation = func(_, _, _, _, _ uint64) uint64 { @@ -304,7 +304,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "success", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { return 8 } computeSubnetForAttestation = func(_, _, _, _, _ uint64) uint64 { diff --git a/cl/phase1/network/services/blob_sidecar_service_test.go b/cl/phase1/network/services/blob_sidecar_service_test.go index e2768e1ee96..ce70b897101 100644 --- a/cl/phase1/network/services/blob_sidecar_service_test.go +++ b/cl/phase1/network/services/blob_sidecar_service_test.go @@ -65,20 +65,24 @@ func TestBlobServiceUnsynced(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - blobService, _, _, _ := setupBlobSidecarService(t, ctrl, false) + blobService, _, _, _ := setupBlobSidecarService(t, ctrl, true) - require.Error(t, blobService.ProcessMessage(context.Background(), nil, &cltypes.BlobSidecar{})) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.Error(t, blobService.ProcessMessage(ctx, nil, &cltypes.BlobSidecar{})) } func TestBlobServiceInvalidIndex(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - blobService, syncedData, _, _ := setupBlobSidecarService(t, ctrl, false) + blobService, syncedData, _, _ := setupBlobSidecarService(t, ctrl, true) stateObj, _, _ := getObjectsForBlobSidecarServiceTests(t) syncedData.OnHeadState(stateObj) - require.Error(t, blobService.ProcessMessage(context.Background(), nil, &cltypes.BlobSidecar{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.Error(t, blobService.ProcessMessage(ctx, nil, &cltypes.BlobSidecar{ Index: 99999, })) } @@ -87,12 +91,14 @@ func TestBlobServiceInvalidSubnet(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - blobService, syncedData, _, _ := setupBlobSidecarService(t, ctrl, false) + blobService, syncedData, _, _ := setupBlobSidecarService(t, ctrl, true) stateObj, _, _ := getObjectsForBlobSidecarServiceTests(t) syncedData.OnHeadState(stateObj) sn := uint64(99999) - require.Error(t, blobService.ProcessMessage(context.Background(), &sn, &cltypes.BlobSidecar{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.Error(t, blobService.ProcessMessage(ctx, &sn, &cltypes.BlobSidecar{ Index: 0, })) } @@ -109,7 +115,9 @@ func TestBlobServiceBadTimings(t *testing.T) { ethClock.EXPECT().GetCurrentSlot().Return(uint64(0)).AnyTimes() ethClock.EXPECT().IsSlotCurrentSlotWithMaximumClockDisparity(gomock.Any()).Return(false).AnyTimes() - require.Error(t, blobService.ProcessMessage(context.Background(), &sn, blobSidecar)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.Error(t, blobService.ProcessMessage(ctx, &sn, blobSidecar)) } func TestBlobServiceAlreadyHave(t *testing.T) { @@ -128,7 +136,9 @@ func TestBlobServiceAlreadyHave(t *testing.T) { ethClock.EXPECT().GetCurrentSlot().Return(uint64(0)).AnyTimes() ethClock.EXPECT().IsSlotCurrentSlotWithMaximumClockDisparity(gomock.Any()).Return(true).AnyTimes() - require.Error(t, blobService.ProcessMessage(context.Background(), &sn, blobSidecar)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.Error(t, blobService.ProcessMessage(ctx, &sn, blobSidecar)) } func TestBlobServiceDontHaveParentRoot(t *testing.T) { @@ -145,7 +155,9 @@ func TestBlobServiceDontHaveParentRoot(t *testing.T) { ethClock.EXPECT().GetCurrentSlot().Return(uint64(0)).AnyTimes() ethClock.EXPECT().IsSlotCurrentSlotWithMaximumClockDisparity(gomock.Any()).Return(true).AnyTimes() - require.Error(t, blobService.ProcessMessage(context.Background(), &sn, blobSidecar)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.Error(t, blobService.ProcessMessage(ctx, &sn, blobSidecar)) } func TestBlobServiceInvalidSidecarSlot(t *testing.T) { @@ -162,7 +174,9 @@ func TestBlobServiceInvalidSidecarSlot(t *testing.T) { ethClock.EXPECT().GetCurrentSlot().Return(uint64(0)).AnyTimes() ethClock.EXPECT().IsSlotCurrentSlotWithMaximumClockDisparity(gomock.Any()).Return(true).AnyTimes() - require.Error(t, blobService.ProcessMessage(context.Background(), &sn, blobSidecar)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.Error(t, blobService.ProcessMessage(ctx, &sn, blobSidecar)) } func TestBlobServiceSuccess(t *testing.T) { diff --git a/cl/phase1/network/services/bls_to_execution_change_service.go b/cl/phase1/network/services/bls_to_execution_change_service.go index 0f9ba191946..9591e6f271c 100644 --- a/cl/phase1/network/services/bls_to_execution_change_service.go +++ b/cl/phase1/network/services/bls_to_execution_change_service.go @@ -5,7 +5,6 @@ import ( "context" "fmt" - "github.com/Giulio2002/bls" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/cl/beacon/beaconevents" "github.com/ledgerwatch/erigon/cl/beacon/synced_data" @@ -19,14 +18,14 @@ import ( type blsToExecutionChangeService struct { operationsPool pool.OperationsPool emitters *beaconevents.Emitters - syncedDataManager *synced_data.SyncedDataManager + syncedDataManager synced_data.SyncedData beaconCfg *clparams.BeaconChainConfig } func NewBLSToExecutionChangeService( operationsPool pool.OperationsPool, emitters *beaconevents.Emitters, - syncedDataManager *synced_data.SyncedDataManager, + syncedDataManager synced_data.SyncedData, beaconCfg *clparams.BeaconChainConfig, ) BLSToExecutionChangeService { return &blsToExecutionChangeService{ @@ -46,20 +45,24 @@ func (s *blsToExecutionChangeService) ProcessMessage(ctx context.Context, subnet return ErrIgnore } change := msg.Message - state := s.syncedDataManager.HeadState() - if state == nil { + stateReader := s.syncedDataManager.HeadStateReader() + if stateReader == nil { + return ErrIgnore + } + stateMutator := s.syncedDataManager.HeadStateMutator() + if stateMutator == nil { return ErrIgnore } // [IGNORE] current_epoch >= CAPELLA_FORK_EPOCH, where current_epoch is defined by the current wall-clock time. - if !(state.Version() >= clparams.CapellaVersion) { + if !(stateReader.Version() >= clparams.CapellaVersion) { return ErrIgnore } // ref: https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#new-process_bls_to_execution_change // assert address_change.validator_index < len(state.validators) - validator, err := state.ValidatorForValidatorIndex(int(change.ValidatorIndex)) + validator, err := stateReader.ValidatorForValidatorIndex(int(change.ValidatorIndex)) if err != nil { - return fmt.Errorf("unable to retrieve state: %v", err) + return fmt.Errorf("unable to retrieve validator: %v", err) } wc := validator.WithdrawalCredentials() @@ -73,20 +76,20 @@ func (s *blsToExecutionChangeService) ProcessMessage(ctx context.Context, subnet // Check the validator's withdrawal credentials against the provided message. hashedFrom := utils.Sha256(change.From[:]) if !bytes.Equal(hashedFrom[1:], wc[1:]) { - return fmt.Errorf("invalid withdrawal credentials") + return fmt.Errorf("invalid withdrawal credentials hash") } // assert bls.Verify(address_change.from_bls_pubkey, signing_root, signed_address_change.signature) - genesisValidatorRoot := state.GenesisValidatorsRoot() + genesisValidatorRoot := stateReader.GenesisValidatorsRoot() domain, err := fork.ComputeDomain(s.beaconCfg.DomainBLSToExecutionChange[:], utils.Uint32ToBytes4(uint32(s.beaconCfg.GenesisForkVersion)), genesisValidatorRoot) if err != nil { return err } - signedRoot, err := fork.ComputeSigningRoot(change, domain) + signedRoot, err := computeSigningRoot(change, domain) if err != nil { return err } - valid, err := bls.Verify(msg.Signature[:], signedRoot[:], change.From[:]) + valid, err := blsVerify(msg.Signature[:], signedRoot[:], change.From[:]) if err != nil { return err } @@ -101,9 +104,9 @@ func (s *blsToExecutionChangeService) ProcessMessage(ctx context.Context, subnet // ) newWc := libcommon.Hash{} newWc[0] = byte(s.beaconCfg.ETH1AddressWithdrawalPrefixByte) - copy(wc[1:], make([]byte, 11)) - copy(wc[12:], change.To[:]) - state.SetWithdrawalCredentialForValidatorAtIndex(int(change.ValidatorIndex), newWc) + copy(newWc[1:], make([]byte, 11)) + copy(newWc[12:], change.To[:]) + stateMutator.SetWithdrawalCredentialForValidatorAtIndex(int(change.ValidatorIndex), newWc) s.operationsPool.BLSToExecutionChangesPool.Insert(msg.Signature, msg) return nil diff --git a/cl/phase1/network/services/bls_to_execution_change_service_test.go b/cl/phase1/network/services/bls_to_execution_change_service_test.go new file mode 100644 index 00000000000..df264c5c1f1 --- /dev/null +++ b/cl/phase1/network/services/bls_to_execution_change_service_test.go @@ -0,0 +1,208 @@ +package services + +import ( + "context" + "errors" + "fmt" + "log" + "testing" + + "github.com/ledgerwatch/erigon-lib/common" + mockState "github.com/ledgerwatch/erigon/cl/abstract/mock_services" + "github.com/ledgerwatch/erigon/cl/beacon/beaconevents" + mockSync "github.com/ledgerwatch/erigon/cl/beacon/synced_data/mock_services" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/pool" + "github.com/ledgerwatch/erigon/cl/utils" + "github.com/stretchr/testify/suite" + "go.uber.org/mock/gomock" +) + +type blsToExecutionChangeTestSuite struct { + suite.Suite + gomockCtrl *gomock.Controller + operationsPool *pool.OperationsPool + emitters *beaconevents.Emitters + syncedData *mockSync.MockSyncedData + beaconCfg *clparams.BeaconChainConfig + + service BLSToExecutionChangeService + mockFuncs *mockFuncs +} + +func (t *blsToExecutionChangeTestSuite) SetupTest() { + t.gomockCtrl = gomock.NewController(t.T()) + t.operationsPool = &pool.OperationsPool{ + BLSToExecutionChangesPool: pool.NewOperationPool[common.Bytes96, *cltypes.SignedBLSToExecutionChange](10, "blsToExecutionChangesPool"), + } + t.syncedData = mockSync.NewMockSyncedData(t.gomockCtrl) + t.emitters = beaconevents.NewEmitters() + t.beaconCfg = &clparams.BeaconChainConfig{} + t.service = NewBLSToExecutionChangeService(*t.operationsPool, t.emitters, t.syncedData, t.beaconCfg) + // mock global functions + t.mockFuncs = &mockFuncs{ + ctrl: t.gomockCtrl, + } + computeSigningRoot = t.mockFuncs.ComputeSigningRoot + blsVerify = t.mockFuncs.BlsVerify +} + +func (t *blsToExecutionChangeTestSuite) TearDownTest() { + t.gomockCtrl.Finish() +} + +func (t *blsToExecutionChangeTestSuite) TestProcessMessage() { + mockMsg := &cltypes.SignedBLSToExecutionChange{ + Message: &cltypes.BLSToExecutionChange{ + ValidatorIndex: 1, + From: common.Bytes48{1, 2, 3, 4, 5, 6}, + To: common.Address{3, 2, 1}, + }, + Signature: [96]byte{1, 2, 3}, + } + + tests := []struct { + name string + mock func() + msg *cltypes.SignedBLSToExecutionChange + wantErr bool + specificErr error + }{ + { + name: "signature already exists in pool", + mock: func() { + t.operationsPool.BLSToExecutionChangesPool.Insert(mockMsg.Signature, mockMsg) + }, + msg: mockMsg, + wantErr: true, + }, + { + name: "version is less than CapellaVersion", + mock: func() { + mockStateReader := mockState.NewMockBeaconStateReader(t.gomockCtrl) + mockStateMutator := mockState.NewMockBeaconStateMutator(t.gomockCtrl) + mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion - 1).Times(1) + t.syncedData.EXPECT().HeadStateReader().Return(mockStateReader).Times(1) + t.syncedData.EXPECT().HeadStateMutator().Return(mockStateMutator).Times(1) + }, + msg: mockMsg, + wantErr: true, + specificErr: ErrIgnore, + }, + { + name: "unable to retrieve validator", + mock: func() { + mockStateReader := mockState.NewMockBeaconStateReader(t.gomockCtrl) + mockStateMutator := mockState.NewMockBeaconStateMutator(t.gomockCtrl) + mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion).Times(1) + mockStateReader.EXPECT().ValidatorForValidatorIndex(int(mockMsg.Message.ValidatorIndex)).Return(nil, errors.New("not found")).Times(1) + t.syncedData.EXPECT().HeadStateReader().Return(mockStateReader).Times(1) + t.syncedData.EXPECT().HeadStateMutator().Return(mockStateMutator).Times(1) + }, + msg: mockMsg, + wantErr: true, + }, + { + name: "invalid withdrawal credentials prefix", + mock: func() { + mockStateReader := mockState.NewMockBeaconStateReader(t.gomockCtrl) + mockStateMutator := mockState.NewMockBeaconStateMutator(t.gomockCtrl) + mockValidator := solid.NewValidator() + mockValidator.SetWithdrawalCredentials([32]byte{1, 1, 1}) // should be equal to BLS_WITHDRAWAL_PREFIX + mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion).Times(1) + mockStateReader.EXPECT().ValidatorForValidatorIndex(int(mockMsg.Message.ValidatorIndex)).Return(mockValidator, nil).Times(1) + t.syncedData.EXPECT().HeadStateReader().Return(mockStateReader).Times(1) + t.syncedData.EXPECT().HeadStateMutator().Return(mockStateMutator).Times(1) + }, + msg: mockMsg, + wantErr: true, + }, + { + name: "hashed from is not equal to withdrawal credentials", + mock: func() { + mockStateReader := mockState.NewMockBeaconStateReader(t.gomockCtrl) + mockStateMutator := mockState.NewMockBeaconStateMutator(t.gomockCtrl) + mockValidator := solid.NewValidator() + mockValidator.SetWithdrawalCredentials([32]byte{0}) // first byte is equal to BLS_WITHDRAWAL_PREFIX + mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion).Times(1) + mockStateReader.EXPECT().ValidatorForValidatorIndex(int(mockMsg.Message.ValidatorIndex)).Return(mockValidator, nil).Times(1) + t.syncedData.EXPECT().HeadStateReader().Return(mockStateReader).Times(1) + t.syncedData.EXPECT().HeadStateMutator().Return(mockStateMutator).Times(1) + }, + msg: mockMsg, + wantErr: true, + }, + { + name: "invalid bls signature", + mock: func() { + mockStateReader := mockState.NewMockBeaconStateReader(t.gomockCtrl) + mockStateMutator := mockState.NewMockBeaconStateMutator(t.gomockCtrl) + mockValidator := solid.NewValidator() + hashedFrom := utils.Sha256(mockMsg.Message.From[:]) + wc := [32]byte{0} + copy(wc[1:], hashedFrom[1:]) + mockValidator.SetWithdrawalCredentials(wc) + mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion).Times(1) + mockStateReader.EXPECT().ValidatorForValidatorIndex(int(mockMsg.Message.ValidatorIndex)).Return(mockValidator, nil).Times(1) + t.syncedData.EXPECT().HeadStateReader().Return(mockStateReader).Times(1) + t.syncedData.EXPECT().HeadStateMutator().Return(mockStateMutator).Times(1) + mockStateReader.EXPECT().GenesisValidatorsRoot().Return([32]byte{}).Times(1) + // bls verify + t.gomockCtrl.RecordCall(t.mockFuncs, "ComputeSigningRoot", mockMsg.Message, gomock.Any()).Return([32]byte{}, nil).Times(1) + t.gomockCtrl.RecordCall(t.mockFuncs, "BlsVerify", mockMsg.Signature[:], gomock.Any(), mockMsg.Message.From[:]).Return(false, nil).Times(1) + }, + msg: mockMsg, + wantErr: true, + }, + { + name: "pass", + mock: func() { + mockStateReader := mockState.NewMockBeaconStateReader(t.gomockCtrl) + mockStateMutator := mockState.NewMockBeaconStateMutator(t.gomockCtrl) + mockValidator := solid.NewValidator() + hashedFrom := utils.Sha256(mockMsg.Message.From[:]) + wc := [32]byte{0} + copy(wc[1:], hashedFrom[1:]) + mockValidator.SetWithdrawalCredentials(wc) + mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion).Times(1) + mockStateReader.EXPECT().ValidatorForValidatorIndex(int(mockMsg.Message.ValidatorIndex)).Return(mockValidator, nil).Times(1) + t.syncedData.EXPECT().HeadStateReader().Return(mockStateReader).Times(1) + t.syncedData.EXPECT().HeadStateMutator().Return(mockStateMutator).Times(1) + mockStateReader.EXPECT().GenesisValidatorsRoot().Return([32]byte{}).Times(1) + // bls verify + t.gomockCtrl.RecordCall(t.mockFuncs, "ComputeSigningRoot", mockMsg.Message, gomock.Any()).Return([32]byte{}, nil).Times(1) + t.gomockCtrl.RecordCall(t.mockFuncs, "BlsVerify", mockMsg.Signature[:], gomock.Any(), mockMsg.Message.From[:]).Return(true, nil).Times(1) + // update withdrawal credentials + mockNewWc := common.Hash{byte(t.beaconCfg.ETH1AddressWithdrawalPrefixByte)} + copy(mockNewWc[1:], make([]byte, 11)) + copy(mockNewWc[12:], mockMsg.Message.To[:]) + mockStateMutator.EXPECT().SetWithdrawalCredentialForValidatorAtIndex(int(mockMsg.Message.ValidatorIndex), mockNewWc).Times(1) + }, + msg: mockMsg, + wantErr: false, + }, + } + + for _, tt := range tests { + log.Printf("Running test case: %s", tt.name) + t.SetupTest() + tt.mock() + err := t.service.ProcessMessage(context.Background(), nil, tt.msg) + if tt.wantErr { + t.Require().Error(err) + fmt.Printf("Error: %v\n", err) + if tt.specificErr != nil { + t.Require().Equal(tt.specificErr, err) + } + } else { + t.Require().NoError(err) + } + t.gomockCtrl.Satisfied() + } +} + +func TestBlsToExecutionChangeTestSuite(t *testing.T) { + suite.Run(t, new(blsToExecutionChangeTestSuite)) +} diff --git a/cl/phase1/network/services/global_mock_test.go b/cl/phase1/network/services/global_mock_test.go new file mode 100644 index 00000000000..0e960abb90a --- /dev/null +++ b/cl/phase1/network/services/global_mock_test.go @@ -0,0 +1,26 @@ +package services + +import ( + "github.com/ledgerwatch/erigon-lib/types/ssz" + "go.uber.org/mock/gomock" +) + +type mockFuncs struct { + ctrl *gomock.Controller +} + +func (m *mockFuncs) ComputeSigningRoot(obj ssz.HashableSSZ, domain []byte) ([32]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ComputeSigningRoot", obj, domain) + ret0, _ := ret[0].([32]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (m *mockFuncs) BlsVerify(pubkey, message, signature []byte) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BlsVerify", pubkey, message, signature) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} diff --git a/cl/phase1/network/services/proposer_slashing_service.go b/cl/phase1/network/services/proposer_slashing_service.go index cfbf36d7525..cdb59156f0d 100644 --- a/cl/phase1/network/services/proposer_slashing_service.go +++ b/cl/phase1/network/services/proposer_slashing_service.go @@ -4,11 +4,9 @@ import ( "context" "fmt" - "github.com/Giulio2002/bls" "github.com/ledgerwatch/erigon/cl/beacon/synced_data" "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/cltypes" - "github.com/ledgerwatch/erigon/cl/fork" st "github.com/ledgerwatch/erigon/cl/phase1/core/state" "github.com/ledgerwatch/erigon/cl/phase1/core/state/lru" "github.com/ledgerwatch/erigon/cl/pool" @@ -17,7 +15,7 @@ import ( type proposerSlashingService struct { operationsPool pool.OperationsPool - syncedDataManager *synced_data.SyncedDataManager + syncedDataManager synced_data.SyncedData beaconCfg *clparams.BeaconChainConfig ethClock eth_clock.EthereumClock cache *lru.Cache[uint64, struct{}] @@ -25,7 +23,7 @@ type proposerSlashingService struct { func NewProposerSlashingService( operationsPool pool.OperationsPool, - syncedDataManager *synced_data.SyncedDataManager, + syncedDataManager synced_data.SyncedData, beaconCfg *clparams.BeaconChainConfig, ethClock eth_clock.EthereumClock, ) *proposerSlashingService { @@ -73,7 +71,7 @@ func (s *proposerSlashingService) ProcessMessage(ctx context.Context, subnet *ui } // Verify the proposer is slashable - state := s.syncedDataManager.HeadState() + state := s.syncedDataManager.HeadStateReader() if state == nil { return ErrIgnore } @@ -87,16 +85,16 @@ func (s *proposerSlashingService) ProcessMessage(ctx context.Context, subnet *ui // Verify signatures for both headers for _, signedHeader := range []*cltypes.SignedBeaconBlockHeader{msg.Header1, msg.Header2} { - domain, err := state.GetDomain(state.BeaconConfig().DomainBeaconProposer, st.GetEpochAtSlot(state.BeaconConfig(), signedHeader.Header.Slot)) + domain, err := state.GetDomain(s.beaconCfg.DomainBeaconProposer, st.GetEpochAtSlot(s.beaconCfg, signedHeader.Header.Slot)) if err != nil { return fmt.Errorf("unable to get domain: %v", err) } pk := proposer.PublicKey() - signingRoot, err := fork.ComputeSigningRoot(signedHeader, domain) + signingRoot, err := computeSigningRoot(signedHeader, domain) if err != nil { return fmt.Errorf("unable to compute signing root: %v", err) } - valid, err := bls.Verify(signedHeader.Signature[:], signingRoot[:], pk[:]) + valid, err := blsVerify(signedHeader.Signature[:], signingRoot[:], pk[:]) if err != nil { return fmt.Errorf("unable to verify signature: %v", err) } diff --git a/cl/phase1/network/services/proposer_slashing_service_test.go b/cl/phase1/network/services/proposer_slashing_service_test.go new file mode 100644 index 00000000000..f181a7b5406 --- /dev/null +++ b/cl/phase1/network/services/proposer_slashing_service_test.go @@ -0,0 +1,223 @@ +package services + +import ( + "context" + "errors" + "log" + "testing" + + "github.com/ledgerwatch/erigon-lib/common" + mockState "github.com/ledgerwatch/erigon/cl/abstract/mock_services" + mockSync "github.com/ledgerwatch/erigon/cl/beacon/synced_data/mock_services" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/pool" + "github.com/ledgerwatch/erigon/cl/utils/eth_clock" + "github.com/stretchr/testify/suite" + "go.uber.org/mock/gomock" +) + +type proposerSlashingTestSuite struct { + suite.Suite + gomockCtrl *gomock.Controller + operationsPool *pool.OperationsPool + syncedData *mockSync.MockSyncedData + beaconCfg *clparams.BeaconChainConfig + ethClock *eth_clock.MockEthereumClock + proposerSlashingService *proposerSlashingService + mockFuncs *mockFuncs +} + +func (t *proposerSlashingTestSuite) SetupTest() { + t.gomockCtrl = gomock.NewController(t.T()) + t.operationsPool = &pool.OperationsPool{ + ProposerSlashingsPool: pool.NewOperationPool[common.Bytes96, *cltypes.ProposerSlashing](10, "proposerSlashingsPool"), + } + t.syncedData = mockSync.NewMockSyncedData(t.gomockCtrl) + t.ethClock = eth_clock.NewMockEthereumClock(t.gomockCtrl) + t.beaconCfg = &clparams.BeaconChainConfig{ + SlotsPerEpoch: 2, + } + t.proposerSlashingService = NewProposerSlashingService(*t.operationsPool, t.syncedData, t.beaconCfg, t.ethClock) + // mock global functions + t.mockFuncs = &mockFuncs{ctrl: t.gomockCtrl} + computeSigningRoot = t.mockFuncs.ComputeSigningRoot + blsVerify = t.mockFuncs.BlsVerify +} + +func (t *proposerSlashingTestSuite) TearDownTest() { + t.gomockCtrl.Finish() +} + +func (t *proposerSlashingTestSuite) TestProcessMessage() { + mockProposerIndex := uint64(123) + mockMsg := &cltypes.ProposerSlashing{ + Header1: &cltypes.SignedBeaconBlockHeader{ + Header: &cltypes.BeaconBlockHeader{ + Slot: 1, + ProposerIndex: mockProposerIndex, + Root: common.Hash{1}, + }, + Signature: common.Bytes96{1, 2, 3}, + }, + Header2: &cltypes.SignedBeaconBlockHeader{ + Header: &cltypes.BeaconBlockHeader{ + Slot: 1, + ProposerIndex: mockProposerIndex, + Root: common.Hash{2}, + }, + Signature: common.Bytes96{4, 5, 6}, + }, + } + tests := []struct { + name string + mock func() + msg *cltypes.ProposerSlashing + wantErr bool + err error + }{ + { + name: "ignore proposer slashing", + mock: func() { + t.proposerSlashingService.cache.Add(mockProposerIndex, struct{}{}) + }, + msg: mockMsg, + wantErr: true, + err: ErrIgnore, + }, + { + name: "ignore proposer slashing in pool", + mock: func() { + t.operationsPool.ProposerSlashingsPool.Insert(pool.ComputeKeyForProposerSlashing(mockMsg), mockMsg) + }, + msg: mockMsg, + wantErr: true, + err: ErrIgnore, + }, + { + name: "non-matching slots", + mock: func() {}, + msg: &cltypes.ProposerSlashing{ + Header1: &cltypes.SignedBeaconBlockHeader{ + Header: &cltypes.BeaconBlockHeader{ + Slot: 1, + ProposerIndex: mockProposerIndex, + }, + }, + Header2: &cltypes.SignedBeaconBlockHeader{ + Header: &cltypes.BeaconBlockHeader{ + Slot: 2, + ProposerIndex: mockProposerIndex, + }, + }, + }, + wantErr: true, + }, + { + name: "non-matching proposer indices", + mock: func() {}, + msg: &cltypes.ProposerSlashing{ + Header1: &cltypes.SignedBeaconBlockHeader{ + Header: &cltypes.BeaconBlockHeader{ + Slot: 1, + ProposerIndex: mockProposerIndex, + }, + }, + Header2: &cltypes.SignedBeaconBlockHeader{ + Header: &cltypes.BeaconBlockHeader{ + Slot: 1, + ProposerIndex: mockProposerIndex + 1, + }, + }, + }, + wantErr: true, + }, + { + name: "empty head state", + mock: func() { + t.syncedData.EXPECT().HeadStateReader().Return(nil).Times(1) + }, + msg: mockMsg, + wantErr: true, + err: ErrIgnore, + }, + { + name: "validator not found", + mock: func() { + mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) + mockState.EXPECT().ValidatorForValidatorIndex(int(mockProposerIndex)).Return(nil, errors.New("not found")).Times(1) + t.syncedData.EXPECT().HeadStateReader().Return(mockState).Times(1) + }, + msg: mockMsg, + wantErr: true, + }, + { + name: "proposer is not slashable", + mock: func() { + mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) + mockValidator := solid.NewValidatorFromParameters( + [48]byte{}, + [32]byte{}, + 0, + false, + 0, + 0, + 0, + 0, + ) + mockState.EXPECT().ValidatorForValidatorIndex(int(mockProposerIndex)).Return(mockValidator, nil).Times(1) + t.syncedData.EXPECT().HeadStateReader().Return(mockState).Times(1) + t.ethClock.EXPECT().GetCurrentEpoch().Return(uint64(1)).Times(1) + }, + msg: mockMsg, + wantErr: true, + }, + { + name: "pass", + mock: func() { + mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) + mockValidator := solid.NewValidatorFromParameters( + [48]byte{}, + [32]byte{}, + 0, + false, + 0, + 0, + 2, + 2, + ) + t.syncedData.EXPECT().HeadStateReader().Return(mockState).Times(1) + mockState.EXPECT().ValidatorForValidatorIndex(int(mockProposerIndex)).Return(mockValidator, nil).Times(1) + t.ethClock.EXPECT().GetCurrentEpoch().Return(uint64(1)).Times(1) + + mockState.EXPECT().GetDomain(t.beaconCfg.DomainBeaconProposer, gomock.Any()).Return([]byte{}, nil).Times(2) + t.mockFuncs.ctrl.RecordCall(t.mockFuncs, "ComputeSigningRoot", mockMsg.Header1, []byte{}).Return([32]byte{}, nil).Times(1) + t.mockFuncs.ctrl.RecordCall(t.mockFuncs, "ComputeSigningRoot", mockMsg.Header2, []byte{}).Return([32]byte{}, nil).Times(1) + t.mockFuncs.ctrl.RecordCall(t.mockFuncs, "BlsVerify", gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil).Times(2) + }, + msg: mockMsg, + wantErr: false, + }, + } + + for _, tt := range tests { + log.Printf("Running test case: %s", tt.name) + t.SetupTest() + tt.mock() + err := t.proposerSlashingService.ProcessMessage(context.Background(), nil, tt.msg) + if tt.wantErr { + t.Assert().Error(err) + if tt.err != nil { + t.Assert().Equal(tt.err, err) + } + } else { + t.Assert().NoError(err) + } + t.gomockCtrl.Satisfied() + } +} + +func TestProposerSlashing(t *testing.T) { + suite.Run(t, new(proposerSlashingTestSuite)) +} diff --git a/cl/phase1/network/services/voluntary_exit_service.go b/cl/phase1/network/services/voluntary_exit_service.go index 925ed88e447..3e192864739 100644 --- a/cl/phase1/network/services/voluntary_exit_service.go +++ b/cl/phase1/network/services/voluntary_exit_service.go @@ -4,7 +4,6 @@ import ( "context" "fmt" - "github.com/Giulio2002/bls" "github.com/ledgerwatch/erigon/cl/beacon/beaconevents" "github.com/ledgerwatch/erigon/cl/beacon/synced_data" "github.com/ledgerwatch/erigon/cl/clparams" @@ -19,7 +18,7 @@ import ( type voluntaryExitService struct { operationsPool pool.OperationsPool emitters *beaconevents.Emitters - syncedDataManager *synced_data.SyncedDataManager + syncedDataManager synced_data.SyncedData beaconCfg *clparams.BeaconChainConfig ethClock eth_clock.EthereumClock } @@ -27,7 +26,7 @@ type voluntaryExitService struct { func NewVoluntaryExitService( operationsPool pool.OperationsPool, emitters *beaconevents.Emitters, - syncedDataManager *synced_data.SyncedDataManager, + syncedDataManager synced_data.SyncedData, beaconCfg *clparams.BeaconChainConfig, ethClock eth_clock.EthereumClock, ) VoluntaryExitService { @@ -52,7 +51,7 @@ func (s *voluntaryExitService) ProcessMessage(ctx context.Context, subnet *uint6 // ref: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#voluntary-exits // def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None: - state := s.syncedDataManager.HeadState() + state := s.syncedDataManager.HeadStateReader() if state == nil { return ErrIgnore } @@ -96,16 +95,16 @@ func (s *voluntaryExitService) ProcessMessage(ctx context.Context, subnet *uint6 if state.Version() < clparams.DenebVersion { domain, err = state.GetDomain(domainType, voluntaryExit.Epoch) } else if state.Version() >= clparams.DenebVersion { - domain, err = fork.ComputeDomain(domainType[:], utils.Uint32ToBytes4(uint32(state.BeaconConfig().CapellaForkVersion)), state.GenesisValidatorsRoot()) + domain, err = fork.ComputeDomain(domainType[:], utils.Uint32ToBytes4(uint32(s.beaconCfg.CapellaForkVersion)), state.GenesisValidatorsRoot()) } if err != nil { return err } - signingRoot, err := fork.ComputeSigningRoot(voluntaryExit, domain) + signingRoot, err := computeSigningRoot(voluntaryExit, domain) if err != nil { return err } - if valid, err := bls.Verify(msg.Signature[:], signingRoot[:], pk[:]); err != nil { + if valid, err := blsVerify(msg.Signature[:], signingRoot[:], pk[:]); err != nil { return err } else if !valid { return errors.New("ProcessVoluntaryExit: BLS verification failed") diff --git a/cl/phase1/network/services/voluntary_exit_service_test.go b/cl/phase1/network/services/voluntary_exit_service_test.go new file mode 100644 index 00000000000..4b041f002d4 --- /dev/null +++ b/cl/phase1/network/services/voluntary_exit_service_test.go @@ -0,0 +1,224 @@ +package services + +import ( + "context" + "log" + "testing" + + "github.com/ledgerwatch/erigon-lib/types/ssz" + mockState "github.com/ledgerwatch/erigon/cl/abstract/mock_services" + "github.com/ledgerwatch/erigon/cl/beacon/beaconevents" + mockSync "github.com/ledgerwatch/erigon/cl/beacon/synced_data/mock_services" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/pool" + "github.com/ledgerwatch/erigon/cl/utils/eth_clock" + "github.com/pkg/errors" + "github.com/stretchr/testify/suite" + gomock "go.uber.org/mock/gomock" +) + +type voluntaryExitTestSuite struct { + suite.Suite + gomockCtrl *gomock.Controller + operationsPool *pool.OperationsPool + emitters *beaconevents.Emitters + syncedData *mockSync.MockSyncedData + ethClock *eth_clock.MockEthereumClock + beaconCfg *clparams.BeaconChainConfig + voluntaryExitService VoluntaryExitService + + mockFuncs *mockFuncs +} + +func (t *voluntaryExitTestSuite) SetupTest() { + computeSigningRoot = func(_ ssz.HashableSSZ, domain []byte) ([32]byte, error) { + return [32]byte{}, nil + } + t.gomockCtrl = gomock.NewController(t.T()) + t.emitters = beaconevents.NewEmitters() + t.operationsPool = &pool.OperationsPool{ + VoluntaryExitsPool: pool.NewOperationPool[uint64, *cltypes.SignedVoluntaryExit](10, "voluntaryExitsPool"), + } + t.syncedData = mockSync.NewMockSyncedData(t.gomockCtrl) + t.ethClock = eth_clock.NewMockEthereumClock(t.gomockCtrl) + t.beaconCfg = &clparams.BeaconChainConfig{} + t.voluntaryExitService = NewVoluntaryExitService(*t.operationsPool, t.emitters, t.syncedData, t.beaconCfg, t.ethClock) + // mock global functions + t.mockFuncs = &mockFuncs{ + ctrl: t.gomockCtrl, + } + blsVerify = t.mockFuncs.BlsVerify +} + +func (t *voluntaryExitTestSuite) TearDownTest() { +} + +func (t *voluntaryExitTestSuite) TestProcessMessage() { + curEpoch := uint64(100) + mockValidatorIndex := uint64(10) + mockMsg := &cltypes.SignedVoluntaryExit{ + VoluntaryExit: &cltypes.VoluntaryExit{ + Epoch: 1, + ValidatorIndex: mockValidatorIndex, + }, + Signature: [96]byte{}, + } + + tests := []struct { + name string + mock func() + msg *cltypes.SignedVoluntaryExit + wantErr bool + err error + }{ + { + name: "validator already in pool", + mock: func() { + t.operationsPool.VoluntaryExitsPool.Insert(mockValidatorIndex, mockMsg) + }, + msg: mockMsg, + wantErr: true, + err: ErrIgnore, + }, + { + name: "state is nil", + mock: func() { + t.syncedData.EXPECT().HeadStateReader().Return(nil) + }, + msg: mockMsg, + wantErr: true, + err: ErrIgnore, + }, + { + name: "validator not found", + mock: func() { + mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) + mockState.EXPECT().ValidatorForValidatorIndex(int(mockValidatorIndex)).Return(nil, errors.New("not found")).Times(1) + t.syncedData.EXPECT().HeadStateReader().Return(mockState).Times(1) + }, + msg: mockMsg, + wantErr: true, + err: ErrIgnore, + }, + { + name: "validator is not active", + mock: func() { + mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) + mockValidator := solid.NewValidatorFromParameters( + [48]byte{}, + [32]byte{}, + 0, + false, + 0, + 0, + 0, + 0, + ) + mockState.EXPECT().ValidatorForValidatorIndex(int(mockValidatorIndex)).Return(mockValidator, nil).Times(1) + t.syncedData.EXPECT().HeadStateReader().Return(mockState).Times(1) + t.ethClock.EXPECT().GetCurrentEpoch().Return(curEpoch).Times(1) + }, + msg: mockMsg, + wantErr: true, + }, + { + name: "validator has been initialized", + mock: func() { + mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) + mockValidator := solid.NewValidatorFromParameters( + [48]byte{}, + [32]byte{}, + 0, + false, + 0, + 0, + curEpoch+1, + 0, + ) + mockState.EXPECT().ValidatorForValidatorIndex(int(mockValidatorIndex)).Return(mockValidator, nil).Times(1) + t.syncedData.EXPECT().HeadStateReader().Return(mockState).Times(1) + t.ethClock.EXPECT().GetCurrentEpoch().Return(curEpoch).Times(1) + }, + msg: mockMsg, + wantErr: true, + }, + { + name: "bls verify failed", + mock: func() { + mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) + mockValidator := solid.NewValidatorFromParameters( + [48]byte{}, + [32]byte{}, + 0, + false, + 0, + 0, + curEpoch+1, + 0, + ) + mockState.EXPECT().ValidatorForValidatorIndex(int(mockValidatorIndex)).Return(mockValidator, nil).Times(1) + t.syncedData.EXPECT().HeadStateReader().Return(mockState).Times(1) + t.ethClock.EXPECT().GetCurrentEpoch().Return(curEpoch).Times(1) + t.beaconCfg.FarFutureEpoch = mockValidator.ExitEpoch() + mockState.EXPECT().Version().Return(clparams.AltairVersion).Times(1) + mockState.EXPECT().GetDomain(t.beaconCfg.DomainVoluntaryExit, mockMsg.VoluntaryExit.Epoch).Return([]byte{}, nil).Times(1) + computeSigningRoot = func(_ ssz.HashableSSZ, domain []byte) ([32]byte, error) { + return [32]byte{}, nil + } + t.gomockCtrl.RecordCall(t.mockFuncs, "BlsVerify", gomock.Any(), gomock.Any(), gomock.Any()).Return(false, nil).Times(1) + }, + msg: mockMsg, + wantErr: true, + }, + { + name: "success", + mock: func() { + mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) + mockValidator := solid.NewValidatorFromParameters( + [48]byte{}, + [32]byte{}, + 0, + false, + 0, + 0, + curEpoch+1, + 0, + ) + mockState.EXPECT().ValidatorForValidatorIndex(int(mockValidatorIndex)).Return(mockValidator, nil).Times(1) + t.syncedData.EXPECT().HeadStateReader().Return(mockState).Times(1) + t.ethClock.EXPECT().GetCurrentEpoch().Return(curEpoch).Times(1) + t.beaconCfg.FarFutureEpoch = mockValidator.ExitEpoch() + mockState.EXPECT().Version().Return(clparams.AltairVersion).Times(1) + mockState.EXPECT().GetDomain(t.beaconCfg.DomainVoluntaryExit, mockMsg.VoluntaryExit.Epoch).Return([]byte{}, nil).Times(1) + computeSigningRoot = func(_ ssz.HashableSSZ, domain []byte) ([32]byte, error) { + return [32]byte{}, nil + } + t.gomockCtrl.RecordCall(t.mockFuncs, "BlsVerify", gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil).Times(1) + }, + msg: mockMsg, + wantErr: false, + }, + } + + for _, tt := range tests { + log.Printf("VoluntaryExit running test case: %s", tt.name) + t.SetupTest() + tt.mock() + err := t.voluntaryExitService.ProcessMessage(context.Background(), nil, tt.msg) + if tt.wantErr { + t.Require().Error(err) + if tt.err != nil { + t.Require().Equal(tt.err, err) + } + log.Printf("error msg: %v", err.Error()) + } else { + t.Require().NoError(err) + } + } +} + +func TestVoluntaryExit(t *testing.T) { + suite.Run(t, new(voluntaryExitTestSuite)) +} diff --git a/cl/phase1/network/subnets/subnets.go b/cl/phase1/network/subnets/subnets.go index 1fba86c09ee..abebfff4ab6 100644 --- a/cl/phase1/network/subnets/subnets.go +++ b/cl/phase1/network/subnets/subnets.go @@ -1,6 +1,7 @@ package subnets import ( + "github.com/ledgerwatch/erigon/cl/abstract" "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/cl/phase1/core/state" ) @@ -64,7 +65,7 @@ func ComputeSubnetForAttestation(committeePerSlot, slot, committeeIndex, slotsPe return (committeesSinceEpochStart + committeeIndex) % attSubnetCount } -func ComputeCommitteeCountPerSlot(s state.BeaconStateReader, slot uint64, slotsPerEpoch uint64) uint64 { +func ComputeCommitteeCountPerSlot(s abstract.BeaconStateReader, slot uint64, slotsPerEpoch uint64) uint64 { epoch := slot / slotsPerEpoch return s.CommitteeCount(epoch) } From 0accb139ea15ad5cd002ecf635c7d5aa7965111e Mon Sep 17 00:00:00 2001 From: awskii Date: Tue, 14 May 2024 03:00:38 +0100 Subject: [PATCH 38/48] close unwind cursors (#10302) --- erigon-lib/state/domain.go | 9 +++++---- eth/stagedsync/stage_execute.go | 4 ---- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/erigon-lib/state/domain.go b/erigon-lib/state/domain.go index cc31fcc5edc..be79dd0f9f3 100644 --- a/erigon-lib/state/domain.go +++ b/erigon-lib/state/domain.go @@ -1184,6 +1184,7 @@ func (dt *DomainRoTx) Unwind(ctx context.Context, rwTx kv.RwTx, step, txNumUnwin if err != nil { return fmt.Errorf("historyRange %s: %w", dt.ht.h.filenameBase, err) } + defer histRng.Close() seen := make(map[string]struct{}) restored := dt.NewWriter() @@ -1196,11 +1197,13 @@ func (dt *DomainRoTx) Unwind(ctx context.Context, rwTx kv.RwTx, step, txNumUnwin ic, err := dt.ht.IdxRange(k, int(txNumUnwindTo)-1, 0, order.Desc, -1, rwTx) if err != nil { + ic.Close() return err } if ic.HasNext() { nextTxn, err := ic.Next() if err != nil { + ic.Close() return err } restored.SetTxNum(nextTxn) // todo what if we actually had to decrease current step to provide correct update? @@ -1209,12 +1212,10 @@ func (dt *DomainRoTx) Unwind(ctx context.Context, rwTx kv.RwTx, step, txNumUnwin } //fmt.Printf("[%s] unwinding %x ->'%x'\n", dt.d.filenameBase, k, v) if err := restored.addValue(k, nil, v); err != nil { + ic.Close() return err } - type closable interface { - Close() - } - ic.(closable).Close() + ic.Close() seen[string(k)] = struct{}{} } diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index 53ba5c40ea7..7d6b1b2c8ec 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -382,10 +382,6 @@ func unwindExec3(u *UnwindState, s *StageState, txc wrap.TxContainer, ctx contex if err := rawdb.DeleteNewerEpochs(txc.Tx, u.UnwindPoint+1); err != nil { return fmt.Errorf("delete newer epochs: %w", err) } - - if err = domains.Flush(ctx, txc.Tx); err != nil { - return fmt.Errorf("uwind flush domains: %w", err) - } fmt.Printf("unwindv3: %d -> %d done within %s\n", s.BlockNumber, u.UnwindPoint, time.Since(start)) return nil } From 9ded7046473d3b46f270e4ebf2107140a8c2221b Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 14 May 2024 09:13:31 +0700 Subject: [PATCH 39/48] recsplit: allow pass NoFsync flag as config field (#10293) --- cl/abstract/mock_services/beacon_state_mutator_mock.go | 2 +- cl/phase1/network/services/voluntary_exit_service_test.go | 2 +- core/snaptype/block_types.go | 2 +- erigon-lib/recsplit/recsplit.go | 5 +++++ erigon-lib/state/domain.go | 8 +++----- erigon-lib/state/history.go | 4 +--- erigon-lib/state/inverted_index.go | 3 ++- erigon-lib/state/merge.go | 5 +---- polygon/sync/service.go | 2 +- 9 files changed, 16 insertions(+), 17 deletions(-) diff --git a/cl/abstract/mock_services/beacon_state_mutator_mock.go b/cl/abstract/mock_services/beacon_state_mutator_mock.go index b265cebddab..ce2eedf4276 100644 --- a/cl/abstract/mock_services/beacon_state_mutator_mock.go +++ b/cl/abstract/mock_services/beacon_state_mutator_mock.go @@ -2120,4 +2120,4 @@ func (c *MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall) D func (c *MockBeaconStateMutator) SetPreviousEpochAttestations(attestations *solid.ListSSZ[*solid.PendingAttestation]) { -} \ No newline at end of file +} diff --git a/cl/phase1/network/services/voluntary_exit_service_test.go b/cl/phase1/network/services/voluntary_exit_service_test.go index 4b041f002d4..fcae428abbb 100644 --- a/cl/phase1/network/services/voluntary_exit_service_test.go +++ b/cl/phase1/network/services/voluntary_exit_service_test.go @@ -16,7 +16,7 @@ import ( "github.com/ledgerwatch/erigon/cl/utils/eth_clock" "github.com/pkg/errors" "github.com/stretchr/testify/suite" - gomock "go.uber.org/mock/gomock" + "go.uber.org/mock/gomock" ) type voluntaryExitTestSuite struct { diff --git a/core/snaptype/block_types.go b/core/snaptype/block_types.go index f38a8c25d9e..0c193d9a1b9 100644 --- a/core/snaptype/block_types.go +++ b/core/snaptype/block_types.go @@ -105,7 +105,7 @@ var ( []snaptype.Index{Indexes.BodyHash}, snaptype.IndexBuilderFunc( func(ctx context.Context, info snaptype.FileInfo, salt uint32, _ *chain.Config, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { - num := make([]byte, 8) + num := make([]byte, binary.MaxVarintLen64) if err := snaptype.BuildIndex(ctx, info, salt, info.From, tmpDir, log.LvlDebug, p, func(idx *recsplit.RecSplit, i, offset uint64, _ []byte) error { if p != nil { diff --git a/erigon-lib/recsplit/recsplit.go b/erigon-lib/recsplit/recsplit.go index 659dc9d2f12..4e18cdff9a0 100644 --- a/erigon-lib/recsplit/recsplit.go +++ b/erigon-lib/recsplit/recsplit.go @@ -136,6 +136,8 @@ type RecSplitArgs struct { EtlBufLimit datasize.ByteSize Salt *uint32 // Hash seed (salt) for the hash function used for allocating the initial buckets - need to be generated randomly LeafSize uint16 + + NoFsync bool // fsync is enabled by default, but tests can manually disable } // NewRecSplit creates a new RecSplit instance with given number of keys and given bucket size @@ -207,6 +209,9 @@ func NewRecSplit(args RecSplitArgs, logger log.Logger) (*RecSplit, error) { } rs.startSeed = args.StartSeed rs.count = make([]uint16, rs.secondaryAggrBound) + if args.NoFsync { + rs.DisableFsync() + } return rs, nil } diff --git a/erigon-lib/state/domain.go b/erigon-lib/state/domain.go index be79dd0f9f3..c0a657c7178 100644 --- a/erigon-lib/state/domain.go +++ b/erigon-lib/state/domain.go @@ -1006,8 +1006,9 @@ func (d *Domain) buildMapIdx(ctx context.Context, fromStep, toStep uint64, data TmpDir: d.dirs.Tmp, IndexFile: idxPath, Salt: d.salt, + NoFsync: d.noFsync, } - return buildIndex(ctx, data, d.compression, idxPath, false, cfg, ps, d.logger, d.noFsync) + return buildIndex(ctx, data, d.compression, idxPath, false, cfg, ps, d.logger) } func (d *Domain) missedBtreeIdxFiles() (l []*filesItem) { @@ -1101,7 +1102,7 @@ func (d *Domain) BuildMissedIndices(ctx context.Context, g *errgroup.Group, ps * } } -func buildIndex(ctx context.Context, d *seg.Decompressor, compressed FileCompression, idxPath string, values bool, cfg recsplit.RecSplitArgs, ps *background.ProgressSet, logger log.Logger, noFsync bool) error { +func buildIndex(ctx context.Context, d *seg.Decompressor, compressed FileCompression, idxPath string, values bool, cfg recsplit.RecSplitArgs, ps *background.ProgressSet, logger log.Logger) error { _, fileName := filepath.Split(idxPath) count := d.Count() if !values { @@ -1121,9 +1122,6 @@ func buildIndex(ctx context.Context, d *seg.Decompressor, compressed FileCompres } defer rs.Close() rs.LogLvl(log.LvlTrace) - if noFsync { - rs.DisableFsync() - } var keyPos, valPos uint64 for { diff --git a/erigon-lib/state/history.go b/erigon-lib/state/history.go index 9c9fe9a863c..62383165a33 100644 --- a/erigon-lib/state/history.go +++ b/erigon-lib/state/history.go @@ -337,15 +337,13 @@ func (h *History) buildVI(ctx context.Context, historyIdxPath string, hist, efHi TmpDir: h.dirs.Tmp, IndexFile: historyIdxPath, Salt: h.salt, + NoFsync: h.noFsync, }, h.logger) if err != nil { return "", fmt.Errorf("create recsplit: %w", err) } defer rs.Close() rs.LogLvl(log.LvlTrace) - if h.noFsync { - rs.DisableFsync() - } var historyKey []byte var txKey [8]byte diff --git a/erigon-lib/state/inverted_index.go b/erigon-lib/state/inverted_index.go index c331526d52e..4d8c20c8211 100644 --- a/erigon-lib/state/inverted_index.go +++ b/erigon-lib/state/inverted_index.go @@ -1566,8 +1566,9 @@ func (ii *InvertedIndex) buildMapIdx(ctx context.Context, fromStep, toStep uint6 TmpDir: ii.dirs.Tmp, IndexFile: idxPath, Salt: ii.salt, + NoFsync: ii.noFsync, } - return buildIndex(ctx, data, ii.compression, idxPath, false, cfg, ps, ii.logger, ii.noFsync) + return buildIndex(ctx, data, ii.compression, idxPath, false, cfg, ps, ii.logger) } func (ii *InvertedIndex) integrateDirtyFiles(sf InvertedFiles, txNumFrom, txNumTo uint64) { diff --git a/erigon-lib/state/merge.go b/erigon-lib/state/merge.go index 4f9491e6e15..6da62192e3a 100644 --- a/erigon-lib/state/merge.go +++ b/erigon-lib/state/merge.go @@ -955,15 +955,12 @@ func (ht *HistoryRoTx) mergeFiles(ctx context.Context, indexFiles, historyFiles TmpDir: ht.h.dirs.Tmp, IndexFile: idxPath, Salt: ht.h.salt, + NoFsync: ht.h.noFsync, }, ht.h.logger); err != nil { return nil, nil, fmt.Errorf("create recsplit: %w", err) } rs.LogLvl(log.LvlTrace) - if ht.h.noFsync { - rs.DisableFsync() - } - var ( txKey [8]byte historyKey []byte diff --git a/polygon/sync/service.go b/polygon/sync/service.go index 531a390ac14..6210ad1bcea 100644 --- a/polygon/sync/service.go +++ b/polygon/sync/service.go @@ -10,7 +10,7 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/direct" - executionproto "github.com/ledgerwatch/erigon-lib/gointerfaces/executionproto" + "github.com/ledgerwatch/erigon-lib/gointerfaces/executionproto" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/p2p/sentry" From 627c3dc1158837e8b7e10bb12f7f23676941a099 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Tue, 14 May 2024 10:34:42 +0200 Subject: [PATCH 40/48] remove nils from p2p logs (#10329) Copy PR #10303 into `main` Co-authored-by: Alex Sharov --- p2p/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/p2p/server.go b/p2p/server.go index cbc8daf5b73..1897da93c45 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -1221,7 +1221,7 @@ func (srv *Server) listErrors() []interface{} { srv.errorsMu.Lock() defer srv.errorsMu.Unlock() - list := make([]interface{}, len(srv.errors)*2) + list := make([]interface{}, 0, len(srv.errors)*2) for err, count := range srv.errors { list = append(list, err, count) } From 7ae53419ddade79d8eb46cabc78d28e503bdddf0 Mon Sep 17 00:00:00 2001 From: racytech <82003208+racytech@users.noreply.github.com> Date: Tue, 14 May 2024 15:17:30 +0600 Subject: [PATCH 41/48] rlp: disallow trailing bytes in transactions (#10296) Co-authored-by: yperbasis --- core/types/encdec_test.go | 2 +- core/types/legacy_tx.go | 9 ++++--- core/types/transaction.go | 48 +++++++++++++++++----------------- core/types/transaction_test.go | 43 ++++++++++++++++++++++++++++++ rlp/decode.go | 5 ++++ 5 files changed, 78 insertions(+), 29 deletions(-) diff --git a/core/types/encdec_test.go b/core/types/encdec_test.go index 97951782588..f446939d285 100644 --- a/core/types/encdec_test.go +++ b/core/types/encdec_test.go @@ -436,7 +436,7 @@ func TestBodyEncodeDecodeRLP(t *testing.T) { } if err := compareBodies(t, enc, dec); err != nil { - t.Errorf("error: compareRawBodies: %v", err) + t.Errorf("error: compareBodies: %v", err) } } } diff --git a/core/types/legacy_tx.go b/core/types/legacy_tx.go index 1d96520a85c..efe0d7ed583 100644 --- a/core/types/legacy_tx.go +++ b/core/types/legacy_tx.go @@ -289,10 +289,11 @@ func (tx *LegacyTx) EncodeRLP(w io.Writer) error { return nil } -// DecodeRLP decodes LegacyTx but with the list token already consumed and encodingSize being presented -func (tx *LegacyTx) DecodeRLP(s *rlp.Stream, encodingSize uint64) error { - var err error - s.NewList(encodingSize) +func (tx *LegacyTx) DecodeRLP(s *rlp.Stream) error { + _, err := s.List() + if err != nil { + return fmt.Errorf("legacy tx must be a list: %w", err) + } if tx.Nonce, err = s.Uint(); err != nil { return fmt.Errorf("read Nonce: %w", err) } diff --git a/core/types/transaction.go b/core/types/transaction.go index 3dabeabbb8d..07135d7ef92 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -78,6 +78,7 @@ type Transaction interface { RawSignatureValues() (*uint256.Int, *uint256.Int, *uint256.Int) EncodingSize() int EncodeRLP(w io.Writer) error + DecodeRLP(s *rlp.Stream) error MarshalBinary(w io.Writer) error // Sender returns the address derived from the signature (V, R, S) using secp256k1 // elliptic curve and an error if it failed deriving or upon an incorrect @@ -114,19 +115,19 @@ func (t BinaryTransactions) EncodeIndex(i int, w *bytes.Buffer) { } func DecodeRLPTransaction(s *rlp.Stream, blobTxnsAreWrappedWithBlobs bool) (Transaction, error) { - kind, size, err := s.Kind() + kind, _, err := s.Kind() if err != nil { return nil, err } if rlp.List == kind { tx := &LegacyTx{} - if err = tx.DecodeRLP(s, size); err != nil { + if err = tx.DecodeRLP(s); err != nil { return nil, err } return tx, nil } if rlp.String != kind { - return nil, fmt.Errorf("Not an RLP encoded transaction. If this is a canonical encoded transaction, use UnmarshalTransactionFromBinary instead. Got %v for kind, expected String", kind) + return nil, fmt.Errorf("not an RLP encoded transaction. If this is a canonical encoded transaction, use UnmarshalTransactionFromBinary instead. Got %v for kind, expected String", kind) } // Decode the EIP-2718 typed TX envelope. var b []byte @@ -164,7 +165,14 @@ func DecodeTransaction(data []byte) (Transaction, error) { return UnmarshalTransactionFromBinary(data, blobTxnsAreWrappedWithBlobs) } s := rlp.NewStream(bytes.NewReader(data), uint64(len(data))) - return DecodeRLPTransaction(s, blobTxnsAreWrappedWithBlobs) + tx, err := DecodeRLPTransaction(s, blobTxnsAreWrappedWithBlobs) + if err != nil { + return nil, err + } + if s.Remaining() != 0 { + return nil, fmt.Errorf("trailing bytes after rlp encoded transaction") + } + return tx, nil } // Parse transaction without envelope. @@ -173,32 +181,17 @@ func UnmarshalTransactionFromBinary(data []byte, blobTxnsAreWrappedWithBlobs boo return nil, fmt.Errorf("short input: %v", len(data)) } s := rlp.NewStream(bytes.NewReader(data[1:]), uint64(len(data)-1)) + var t Transaction switch data[0] { case AccessListTxType: - t := &AccessListTx{} - if err := t.DecodeRLP(s); err != nil { - return nil, err - } - return t, nil + t = &AccessListTx{} case DynamicFeeTxType: - t := &DynamicFeeTransaction{} - if err := t.DecodeRLP(s); err != nil { - return nil, err - } - return t, nil + t = &DynamicFeeTransaction{} case BlobTxType: if blobTxnsAreWrappedWithBlobs { - t := &BlobTxWrapper{} - if err := t.DecodeRLP(s); err != nil { - return nil, err - } - return t, nil + t = &BlobTxWrapper{} } else { - t := &BlobTx{} - if err := t.DecodeRLP(s); err != nil { - return nil, err - } - return t, nil + t = &BlobTx{} } default: if data[0] >= 0x80 { @@ -207,6 +200,13 @@ func UnmarshalTransactionFromBinary(data []byte, blobTxnsAreWrappedWithBlobs boo } return nil, ErrTxTypeNotSupported } + if err := t.DecodeRLP(s); err != nil { + return nil, err + } + if s.Remaining() != 0 { + return nil, fmt.Errorf("trailing bytes after rlp encoded transaction") + } + return t, nil } // Remove everything but the payload body from the wrapper - this is not used, for reference only diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index dfa5fd217b7..669389e635a 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -826,3 +826,46 @@ func TestShortUnwrapLib(t *testing.T) { assertEqual(blobTx, &wrappedBlobTx.Tx) } + +func TestTrailingBytes(t *testing.T) { + // Create a valid transaction + valid_rlp_transaction := []byte{201, 38, 38, 128, 128, 107, 58, 42, 38, 42} + + // Test valid transaction + transactions := make([][]byte, 1) + transactions[0] = valid_rlp_transaction + + for _, txn := range transactions { + if TypedTransactionMarshalledAsRlpString(txn) { + panic("TypedTransactionMarshalledAsRlpString() error") + } + } + + _, err := DecodeTransactions(transactions) + if err != nil { + fmt.Println("Valid transaction errored") + panic(err) // @audit this will pass + } + + // Append excess bytes to the blob transaction + num_excess := 100 + malicious_rlp_transaction := make([]byte, len(valid_rlp_transaction)+num_excess) + copy(malicious_rlp_transaction, valid_rlp_transaction) + + // Validate transactions are different + assert.NotEqual(t, malicious_rlp_transaction, valid_rlp_transaction) + + // Test malicious transaction + transactions[0] = malicious_rlp_transaction + + for _, txn := range transactions { + if TypedTransactionMarshalledAsRlpString(txn) { + panic("TypedTransactionMarshalledAsRlpString() error") + } + } + + _, err = DecodeTransactions(transactions) + if err == nil { + panic("Malicious transaction has not errored!") // @audit this panic is occurs + } +} diff --git a/rlp/decode.go b/rlp/decode.go index 4824946e558..1c16d3fd3fe 100644 --- a/rlp/decode.go +++ b/rlp/decode.go @@ -672,6 +672,11 @@ func NewListStream(r io.Reader, len uint64) *Stream { return s } +// Remaining returns number of bytes remaining to be read +func (s *Stream) Remaining() uint64 { + return s.remaining +} + // Bytes reads an RLP string and returns its contents as a byte slice. // If the input does not contain an RLP string, the returned // error will be ErrExpectedString. From 0335398f4e74a66537c5f9dc980bb1d9f53c285c Mon Sep 17 00:00:00 2001 From: awskii Date: Tue, 14 May 2024 13:05:15 +0100 Subject: [PATCH 42/48] add metrics about unwind and commitment (#10300) Co-authored-by: alex.sharov --- .../dashboards/erigon_internals.json | 1675 ++++++----------- core/state/rw_v3.go | 11 + erigon-lib/commitment/commitment.go | 16 +- erigon-lib/commitment/hex_patricia_hashed.go | 6 +- erigon-lib/metrics/register.go | 13 +- erigon-lib/metrics/set.go | 9 +- erigon-lib/state/aggregator.go | 5 - erigon-lib/state/aggregator_files.go | 42 - erigon-lib/state/domain.go | 4 + erigon-lib/state/domain_shared.go | 2 + erigon-lib/state/metrics.go | 59 + 11 files changed, 706 insertions(+), 1136 deletions(-) create mode 100644 erigon-lib/state/metrics.go diff --git a/cmd/prometheus/dashboards/erigon_internals.json b/cmd/prometheus/dashboards/erigon_internals.json index 93a9c7e5c7e..445e2ddc5b3 100644 --- a/cmd/prometheus/dashboards/erigon_internals.json +++ b/cmd/prometheus/dashboards/erigon_internals.json @@ -1,47 +1,4 @@ { - "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__elements": {}, - "__requires": [ - { - "type": "panel", - "id": "bargauge", - "name": "Bar gauge", - "version": "" - }, - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "10.4.2" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - }, - { - "type": "panel", - "id": "stat", - "name": "Stat", - "version": "" - }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], "annotations": { "list": [ { @@ -67,7 +24,7 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": null, + "id": 2, "links": [], "liveNow": false, "panels": [ @@ -258,7 +215,7 @@ "overrides": [] }, "gridPos": { - "h": 5, + "h": 6, "w": 8, "x": 8, "y": 1 @@ -278,7 +235,7 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { @@ -360,192 +317,12 @@ "overrides": [] }, "gridPos": { - "h": 4, + "h": 6, "w": 8, "x": 16, "y": 1 }, "id": 200, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "10.3.4", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "prune_seconds{quantile=\"$quantile\",instance=~\"$instance\"}", - "instant": false, - "legendFormat": "{{instance}} {{type}} ", - "range": true, - "refId": "A" - } - ], - "title": "Prune, seconds", - "transparent": true, - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 2 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 8, - "x": 16, - "y": 5 - }, - "id": 202, - "options": { - "displayMode": "lcd", - "maxVizHeight": 300, - "minVizHeight": 16, - "minVizWidth": 8, - "namePlacement": "auto", - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showUnfilled": true, - "sizing": "auto", - "valueMode": "color" - }, - "pluginVersion": "10.4.2", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "domain_prunable{instance=~\"$instance\",type=\"domain\"}", - "hide": false, - "legendFormat": "{{instance}}-{{type}}-{{table}}", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "domain_prunable{instance=~\"$instance\",type=\"history\",table!=\"commitment\"}/1562500", - "hide": false, - "legendFormat": "{{instance}}-{{type}}-{{table}}", - "range": true, - "refId": "C" - } - ], - "title": "pruning availability, steps", - "type": "bargauge" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": true, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "ops" - }, - "overrides": [] - }, - "gridPos": { - "h": 5, - "w": 8, - "x": 8, - "y": 6 - }, - "id": 158, "options": { "legend": { "calcs": [ @@ -560,7 +337,7 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { @@ -568,17 +345,14 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "exemplar": true, - "expr": "rate(sync{instance=~\"$instance\",stage=\"execution\"}[$rate_interval])", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ stage }}: {{instance}}", + "expr": "prune_seconds{quantile=\"$quantile\",instance=~\"$instance\"}", + "instant": false, + "legendFormat": "{{ type }}: {{ instance }}", "range": true, "refId": "A" } ], - "title": "Sync Stages progress rate", + "title": "Prune, seconds", "type": "timeseries" }, { @@ -645,7 +419,7 @@ "overrides": [] }, "gridPos": { - "h": 8, + "h": 9, "w": 8, "x": 0, "y": 7 @@ -695,24 +469,12 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "sum(rate(domain_commitment_keys[$rate_interval])) by (instance)", + "expr": "irate(domain_commitment_keys{instance=~\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "keys committed: {{instance}}", "range": true, "refId": "A" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "irate(domain_commitment_updates{instance=~\"$instance\"}[$rate_interval])", - "hide": false, - "legendFormat": "commitment node updates: {{instance}}", - "range": true, - "refId": "C" - }, { "datasource": { "type": "prometheus", @@ -721,7 +483,7 @@ "editorMode": "code", "expr": "irate(domain_commitment_updates_applied{instance=~\"$instance\"}[$rate_interval])", "hide": false, - "legendFormat": "commitment trie node updates: {{instance}}", + "legendFormat": "prefixes committed {{instance}}", "range": true, "refId": "F" }, @@ -759,7 +521,7 @@ "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", - "fillOpacity": 0, + "fillOpacity": 10, "gradientMode": "none", "hideFrom": { "legend": false, @@ -767,14 +529,14 @@ "viz": false }, "insertNulls": false, - "lineInterpolation": "smooth", + "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", - "spanNulls": false, + "showPoints": "never", + "spanNulls": true, "stacking": { "group": "A", "mode": "none" @@ -796,27 +558,30 @@ "value": 80 } ] - } + }, + "unit": "ops" }, "overrides": [] }, "gridPos": { - "h": 5, + "h": 6, "w": 8, "x": 8, - "y": 11 + "y": 7 }, - "id": 198, + "id": 158, "options": { "legend": { - "calcs": [], + "calcs": [ + "mean" + ], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "multi", - "sort": "desc" + "sort": "none" } }, "pluginVersion": "10.3.4", @@ -827,17 +592,198 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "domain_running_merges{instance=~\"$instance\"}", - "legendFormat": "running merges: {{instance}}", + "exemplar": true, + "expr": "rate(sync{instance=~\"$instance\",stage=\"execution\"}[$rate_interval])", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ stage }}: {{instance}}", "range": true, "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", + } + ], + "title": "Sync Stages progress rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 2 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 8, + "x": 16, + "y": 7 + }, + "id": 202, + "options": { + "displayMode": "lcd", + "maxVizHeight": 300, + "minVizHeight": 16, + "minVizWidth": 8, + "namePlacement": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "sizing": "auto", + "valueMode": "color" + }, + "pluginVersion": "10.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "domain_prunable{instance=~\"$instance\",type=\"domain\"}", + "hide": false, + "legendFormat": "{{instance}}-{{type}}-{{table}}", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "domain_prunable{instance=~\"$instance\",type=\"history\",table!=\"commitment\"}/1562500", + "hide": false, + "legendFormat": "{{instance}}-{{type}}-{{table}}", + "range": true, + "refId": "C" + } + ], + "title": "pruning availability, steps", + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 8, + "y": 13 + }, + "id": 198, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.3.4", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "domain_running_merges{instance=~\"$instance\"}", + "legendFormat": "running merges: {{instance}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", "expr": "domain_running_collations{instance=~\"$instance\"}", "hide": false, "legendFormat": "running collations: {{instance}}", @@ -887,15 +833,15 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "domain_wal_flushes{instance=~\"$instance\"}", + "expr": "domain_running_unwind{instance=~\"$instance\"}", "hide": false, "instant": false, - "legendFormat": "WAL flushes {{instance}}", + "legendFormat": "running unwind {{instance}}", "range": true, - "refId": "F" + "refId": "G" } ], - "title": "State: running collate/merge/prune", + "title": "State: running collate/merge/prune/unwind", "type": "timeseries" }, { @@ -962,7 +908,7 @@ "h": 5, "w": 8, "x": 16, - "y": 11 + "y": 13 }, "id": 199, "options": { @@ -980,7 +926,7 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { @@ -1058,39 +1004,13 @@ }, "unit": "s" }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "prune took [index]: mainnet3-1:6061", - "prune took [index]: mainnet3-3:6061" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + "overrides": [] }, "gridPos": { - "h": 6, + "h": 7, "w": 8, "x": 0, - "y": 15 + "y": 16 }, "id": 112, "options": { @@ -1115,10 +1035,10 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "rate(domain_collate_took_sum{instance=~\"$instance\"}[$rate_interval])", + "expr": "rate(domain_collate_took_sum{instance=~\"$instance\",type=\"domain\"}[$rate_interval])", "format": "time_series", "instant": false, - "legendFormat": "collation took: {{instance}}", + "legendFormat": "collation [domain]: {{instance}}", "range": true, "refId": "A" }, @@ -1128,9 +1048,9 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "rate(domain_step_took_sum{instance=~\"$instance\"}[$rate_interval])", + "expr": "rate(domain_step_took_sum{instance=~\"$instance\"}[$__rate_interval])", "hide": false, - "legendFormat": "step took: {{instance}}", + "legendFormat": "step: {{instance}}", "range": true, "refId": "C" }, @@ -1142,7 +1062,7 @@ "editorMode": "code", "expr": "rate(domain_prune_took_sum{instance=~\"$instance\"}[$rate_interval])", "hide": false, - "legendFormat": "prune took [{{type}}]: {{instance}}", + "legendFormat": "prune [{{type}}]: {{instance}}", "range": true, "refId": "B" }, @@ -1154,7 +1074,7 @@ "editorMode": "code", "expr": "rate(domain_commitment_took_sum{instance=~\"$instance\"}[$rate_interval])", "hide": false, - "legendFormat": "commitment took: {{instance}}", + "legendFormat": "commitment: {{instance}}", "range": true, "refId": "D" }, @@ -1164,16 +1084,42 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "exemplar": false, - "expr": "rate(domain_commitment_write_took_sum{instance=~\"$instance\"}[$rate_interval])", + "expr": "rate(domain_collate_took_sum{instance=~\"$instance\",type=\"index\"}[$rate_interval])", + "format": "time_series", "hide": false, "instant": false, - "legendFormat": "commitment update write took: {{instance}}", + "legendFormat": "collation [idx]: {{instance}}", "range": true, - "refId": "F" + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "rate(domain_unwind_took{instance=~\"$instance\",type=\"domain\"}[$rate_interval])", + "hide": false, + "instant": false, + "legendFormat": "unwind [domain] {{label_name}}", + "range": true, + "refId": "G" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "rate(domain_unwind_took{instance=~\"$instance\",type=\"shared\"}[$rate_interval])", + "hide": false, + "instant": false, + "legendFormat": "unwind [SharedDomain] {{label_name}}", + "range": true, + "refId": "H" } ], - "title": "State: timins", + "title": "State: timings", "type": "timeseries" }, { @@ -1241,7 +1187,7 @@ "h": 5, "w": 8, "x": 8, - "y": 16 + "y": 18 }, "id": 194, "options": { @@ -1258,7 +1204,7 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { @@ -1358,7 +1304,7 @@ "h": 5, "w": 8, "x": 16, - "y": 16 + "y": 18 }, "id": 201, "options": { @@ -1435,7 +1381,7 @@ "h": 1, "w": 24, "x": 0, - "y": 21 + "y": 23 }, "id": 17, "panels": [], @@ -1506,8 +1452,7 @@ } ] }, - "unit": "ops", - "unitScale": true + "unit": "ops" }, "overrides": [] }, @@ -1515,7 +1460,7 @@ "h": 5, "w": 8, "x": 0, - "y": 22 + "y": 24 }, "id": 141, "options": { @@ -1530,7 +1475,7 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { @@ -1605,41 +1550,15 @@ } ] }, - "unit": "s", - "unitScale": true + "unit": "s" }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "sync: mainnet3-1:6061" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + "overrides": [] }, "gridPos": { - "h": 9, + "h": 5, "w": 16, "x": 8, - "y": 22 + "y": 24 }, "id": 166, "options": { @@ -1656,7 +1575,7 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { @@ -1898,8 +1817,7 @@ } ] }, - "unit": "decbytes", - "unitScale": true + "unit": "decbytes" }, "overrides": [] }, @@ -1907,7 +1825,7 @@ "h": 5, "w": 8, "x": 0, - "y": 27 + "y": 29 }, "id": 159, "options": { @@ -2006,42 +1924,15 @@ } ] }, - "unit": "short", - "unitScale": true + "unit": "short" }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "cow: mainnet3-1:6061", - "cow: mainnet3-3:6061" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + "overrides": [] }, "gridPos": { - "h": 7, + "h": 9, "w": 16, "x": 8, - "y": 31 + "y": 29 }, "id": 168, "options": { @@ -2058,7 +1949,7 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { @@ -2313,8 +2204,7 @@ } ] }, - "unit": "decbytes", - "unitScale": true + "unit": "decbytes" }, "overrides": [] }, @@ -2322,7 +2212,7 @@ "h": 6, "w": 8, "x": 0, - "y": 32 + "y": 34 }, "id": 167, "options": { @@ -2339,7 +2229,7 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { @@ -2375,6 +2265,7 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, + "description": "", "fieldConfig": { "defaults": { "color": { @@ -2402,7 +2293,7 @@ "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", + "showPoints": "never", "spanNulls": false, "stacking": { "group": "A", @@ -2425,44 +2316,17 @@ } ] }, - "unit": "short", - "unitScale": true + "unit": "short" }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "gc_leaf: mainnet3-3:6061", - "gc_leaf: mainnet3-1:6061" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + "overrides": [] }, "gridPos": { - "h": 6, - "w": 8, - "x": 0, + "h": 4, + "w": 16, + "x": 8, "y": 38 }, - "id": 169, + "id": 150, "options": { "legend": { "calcs": [ @@ -2477,7 +2341,7 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { @@ -2485,9 +2349,9 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "db_gc_leaf{instance=~\"$instance\"}", + "expr": "rate(process_minor_pagefaults_total{instance=~\"$instance\"}[$rate_interval])", "interval": "", - "legendFormat": "gc_leaf: {{instance}}", + "legendFormat": "soft: {{instance}}", "refId": "A" }, { @@ -2496,28 +2360,14 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "db_gc_overflow{instance=~\"$instance\"}", + "expr": "rate(process_major_pagefaults_total{instance=~\"$instance\"}[$rate_interval])", "hide": false, "interval": "", - "legendFormat": "gc_overflow: {{instance}}", + "legendFormat": "hard: {{instance}}", "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": true, - "expr": "exec_steps_in_db{instance=~\"$instance\"}/100", - "hide": false, - "interval": "", - "legendFormat": "exec_steps_in_db: {{instance}}", - "range": true, - "refId": "E" } ], - "title": "GC and State", + "title": "getrusage: minflt - soft page faults (reclaims), majflt - hard faults", "type": "timeseries" }, { @@ -2525,7 +2375,6 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "", "fieldConfig": { "defaults": { "color": { @@ -2553,7 +2402,7 @@ "scaleDistribution": { "type": "linear" }, - "showPoints": "never", + "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", @@ -2576,44 +2425,17 @@ } ] }, - "unit": "short", - "unitScale": true + "unit": "short" }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "hard: mainnet3-1:6061", - "hard: mainnet3-3:6061" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + "overrides": [] }, "gridPos": { "h": 6, - "w": 16, - "x": 8, - "y": 38 + "w": 8, + "x": 0, + "y": 40 }, - "id": 150, + "id": 169, "options": { "legend": { "calcs": [ @@ -2628,7 +2450,7 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { @@ -2636,9 +2458,9 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "rate(process_minor_pagefaults_total{instance=~\"$instance\"}[$rate_interval])", + "expr": "db_gc_leaf{instance=~\"$instance\"}", "interval": "", - "legendFormat": "soft: {{instance}}", + "legendFormat": "gc_leaf: {{instance}}", "refId": "A" }, { @@ -2647,14 +2469,28 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "rate(process_major_pagefaults_total{instance=~\"$instance\"}[$rate_interval])", + "expr": "db_gc_overflow{instance=~\"$instance\"}", "hide": false, "interval": "", - "legendFormat": "hard: {{instance}}", + "legendFormat": "gc_overflow: {{instance}}", "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "exec_steps_in_db{instance=~\"$instance\"}/100", + "hide": false, + "interval": "", + "legendFormat": "exec_steps_in_db: {{instance}}", + "range": true, + "refId": "E" } ], - "title": "getrusage: minflt - soft page faults (reclaims), majflt - hard faults", + "title": "GC and State", "type": "timeseries" }, { @@ -2711,16 +2547,15 @@ "value": 80 } ] - }, - "unitScale": true + } }, "overrides": [] }, "gridPos": { - "h": 8, - "w": 16, + "h": 4, + "w": 15, "x": 8, - "y": 44 + "y": 42 }, "id": 191, "options": { @@ -2735,7 +2570,7 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { @@ -2932,7 +2767,7 @@ "h": 1, "w": 24, "x": 0, - "y": 52 + "y": 46 }, "id": 134, "panels": [], @@ -2952,58 +2787,94 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, + "description": "", "fieldConfig": { "defaults": { "color": { - "mode": "thresholds" + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } }, "mappings": [], "thresholds": { "mode": "absolute", - "steps": [] + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] }, - "unit": "short", - "unitScale": true + "unit": "decbytes" }, "overrides": [] }, "gridPos": { - "h": 18, + "h": 6, "w": 8, "x": 0, - "y": 53 + "y": 47 }, - "id": 165, + "id": 148, "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "horizontal", - "reduceOptions": { + "legend": { "calcs": [ - "range" + "max" ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": { - "titleSize": 14, - "valueSize": 14 + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, - "textMode": "auto", - "wideLayout": true + "tooltip": { + "mode": "multi", + "sort": "none" + } }, - "pluginVersion": "10.3.5", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "process_io_read_syscalls_total{instance=~\"$instance\"}", + "exemplar": true, + "expr": "process_virtual_memory_bytes{instance=~\"$instance\"}", + "hide": true, "interval": "", - "legendFormat": "process_io_read_syscalls_total: {{instance}}", + "legendFormat": "resident virtual mem: {{instance}}", "refId": "A" }, { @@ -3011,10 +2882,11 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "process_io_write_syscalls_total{instance=~\"$instance\"}", - "hide": false, + "exemplar": true, + "expr": "process_resident_memory_anon_bytes{instance=~\"$instance\"}", + "hide": true, "interval": "", - "legendFormat": "process_io_write_syscalls_total: {{instance}}", + "legendFormat": "resident anon mem: {{instance}}", "refId": "B" }, { @@ -3022,10 +2894,11 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "process_minor_pagefaults_total{instance=~\"$instance\"}", + "exemplar": true, + "expr": "process_resident_memory_bytes{instance=~\"$instance\"}", "hide": false, "interval": "", - "legendFormat": "process_minor_pagefaults_total: {{instance}}", + "legendFormat": "resident mem: {{instance}}", "refId": "C" }, { @@ -3033,10 +2906,10 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "process_major_pagefaults_total{instance=~\"$instance\"}", + "expr": "mem_data{instance=~\"$instance\"}", "hide": false, "interval": "", - "legendFormat": "process_major_pagefaults_total: {{instance}}", + "legendFormat": "data: {{instance}}", "refId": "D" }, { @@ -3044,10 +2917,10 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "process_io_storage_read_bytes_total{instance=~\"$instance\"}", + "expr": "mem_stack{instance=~\"$instance\"}", "hide": false, "interval": "", - "legendFormat": "process_io_storage_read_bytes_total: {{instance}}", + "legendFormat": "stack: {{instance}}", "refId": "E" }, { @@ -3055,10 +2928,10 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "process_io_storage_written_bytes_total{instance=~\"$instance\"}", + "expr": "mem_locked{instance=~\"$instance\"}", "hide": false, "interval": "", - "legendFormat": "process_io_storage_written_bytes_total: {{instance}}", + "legendFormat": "locked: {{instance}}", "refId": "F" }, { @@ -3066,92 +2939,15 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "db_pgops_newly{instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "pgops_newly: {{instance}}", - "refId": "H" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "db_pgops_cow{instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "pgops_cow: {{instance}}", - "refId": "I" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "db_pgops_clone{instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "pgops_clone: {{instance}}", - "refId": "J" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "db_pgops_split{instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "pgops_split: {{instance}}", - "refId": "K" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "db_pgops_merge{instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "pgops_merge: {{instance}}", - "refId": "L" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "db_pgops_spill{instance=~\"$instance\"}", + "expr": "mem_swap{instance=~\"$instance\"}", "hide": false, "interval": "", - "legendFormat": "pgops_spill: {{instance}}", + "legendFormat": "swap: {{instance}}", "refId": "G" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "db_pgops_unspill{instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "pgops_unspill: {{instance}}", - "refId": "M" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "db_pgops_wops{instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "pgops_wops: {{instance}}", - "refId": "N" } ], - "title": "Rusage Total (\"last value\" - \"first value\" on selected period)", - "type": "stat" + "title": "mem: resident set size", + "type": "timeseries" }, { "datasource": { @@ -3209,42 +3005,15 @@ } ] }, - "unit": "none", - "unitScale": true + "unit": "none" }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "out: mainnet3-1:6061", - "out: mainnet3-3:6061" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + "overrides": [] }, "gridPos": { "h": 6, "w": 8, "x": 8, - "y": 53 + "y": 47 }, "id": 155, "options": { @@ -3261,7 +3030,7 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { @@ -3351,8 +3120,7 @@ } ] }, - "unit": "cps", - "unitScale": true + "unit": "cps" }, "overrides": [] }, @@ -3360,7 +3128,7 @@ "h": 6, "w": 8, "x": 16, - "y": 53 + "y": 47 }, "id": 153, "options": { @@ -3377,7 +3145,7 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { @@ -3401,6 +3169,7 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, + "description": "", "fieldConfig": { "defaults": { "color": { @@ -3429,7 +3198,7 @@ "type": "linear" }, "showPoints": "never", - "spanNulls": false, + "spanNulls": true, "stacking": { "group": "A", "mode": "none" @@ -3451,44 +3220,17 @@ } ] }, - "unit": "short", - "unitScale": true + "unit": "short" }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "read: mainnet3-3:6061", - "read: mainnet3-1:6061" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + "overrides": [] }, "gridPos": { "h": 6, "w": 8, - "x": 8, - "y": 59 + "x": 0, + "y": 53 }, - "id": 85, + "id": 86, "options": { "legend": { "calcs": [ @@ -3503,19 +3245,21 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, + "editorMode": "code", "exemplar": true, - "expr": "rate(process_io_storage_read_bytes_total{instance=~\"$instance\"}[$rate_interval])", + "expr": "rate(go_memstats_mallocs_total{instance=~\"$instance\"}[$rate_interval])", "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "read: {{instance}}", + "legendFormat": "memstats_mallocs_total: {{ instance }}", + "range": true, "refId": "A" }, { @@ -3523,125 +3267,19 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, + "editorMode": "code", "exemplar": true, - "expr": "rate(process_io_storage_written_bytes_total{instance=~\"$instance\"}[$rate_interval])", + "expr": "rate(go_memstats_frees_total{instance=~\"$instance\"}[$rate_interval])", "format": "time_series", + "hide": false, "interval": "", "intervalFactor": 1, - "legendFormat": "write: {{instance}}", - "refId": "B" - } - ], - "title": "Disk bytes/sec", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": true, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 8, - "x": 16, - "y": 59 - }, - "id": 128, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "8.0.6", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "go_goroutines{instance=~\"$instance\"}", - "instant": false, - "interval": "", - "legendFormat": "goroutines: {{instance}}", - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "go_threads{instance=~\"$instance\"}", - "instant": false, - "interval": "", - "legendFormat": "threads: {{instance}}", + "legendFormat": "memstats_frees_total: {{ instance }}", + "range": true, "refId": "B" } ], - "title": "GO Goroutines and Threads", + "title": "Process Mem: allocate objects/sec, free", "type": "timeseries" }, { @@ -3649,7 +3287,6 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "", "fieldConfig": { "defaults": { "color": { @@ -3678,7 +3315,7 @@ "type": "linear" }, "showPoints": "never", - "spanNulls": true, + "spanNulls": false, "stacking": { "group": "A", "mode": "none" @@ -3700,8 +3337,7 @@ } ] }, - "unit": "decbytes", - "unitScale": true + "unit": "short" }, "overrides": [] }, @@ -3709,9 +3345,9 @@ "h": 6, "w": 8, "x": 8, - "y": 65 + "y": 53 }, - "id": 154, + "id": 85, "options": { "legend": { "calcs": [ @@ -3726,21 +3362,19 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", "exemplar": true, - "expr": "go_memstats_stack_sys_bytes{instance=~\"$instance\"}", + "expr": "rate(process_io_storage_read_bytes_total{instance=~\"$instance\"}[$rate_interval])", "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "stack_sys: {{ instance }}", - "range": true, + "legendFormat": "read: {{instance}}", "refId": "A" }, { @@ -3748,83 +3382,16 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", "exemplar": true, - "expr": "go_memstats_sys_bytes{instance=~\"$instance\"}", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "sys: {{ instance }}", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": true, - "expr": "go_memstats_stack_inuse_bytes{instance=~\"$instance\"}", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "stack_inuse: {{ instance }}", - "range": true, - "refId": "C" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": true, - "expr": "go_memstats_mspan_sys_bytes{instance=~\"$instance\"}", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "mspan_sys: {{ instance }}", - "range": true, - "refId": "D" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": true, - "expr": "go_memstats_mcache_sys_bytes{instance=~\"$instance\"}", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "mcache_sys: {{ instance }}", - "range": true, - "refId": "E" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": true, - "expr": "go_memstats_heap_alloc_bytes{instance=~\"$instance\"}", + "expr": "rate(process_io_storage_written_bytes_total{instance=~\"$instance\"}[$rate_interval])", "format": "time_series", - "hide": false, "interval": "", "intervalFactor": 1, - "legendFormat": "heap_alloc: {{ instance }}", - "range": true, - "refId": "F" + "legendFormat": "write: {{instance}}", + "refId": "B" } ], - "title": "go memstat", + "title": "Disk bytes/sec", "type": "timeseries" }, { @@ -3845,7 +3412,7 @@ "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", - "fillOpacity": 10, + "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, @@ -3882,18 +3449,17 @@ } ] }, - "unit": "s", - "unitScale": true + "unit": "none" }, "overrides": [] }, "gridPos": { - "h": 5, + "h": 6, "w": 8, "x": 16, - "y": 65 + "y": 53 }, - "id": 124, + "id": 128, "options": { "legend": { "calcs": [], @@ -3906,7 +3472,7 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { @@ -3914,15 +3480,26 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "exemplar": true, - "expr": "rate(go_gc_duration_seconds{quantile=\"0.75\",instance=~\"$instance\"}[$rate_interval])", + "expr": "go_goroutines{instance=~\"$instance\"}", "instant": false, "interval": "", - "legendFormat": "", + "legendFormat": "goroutines: {{instance}}", "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "go_threads{instance=~\"$instance\"}", + "instant": false, + "interval": "", + "legendFormat": "threads: {{instance}}", + "refId": "B" } ], - "title": "GC Stop the World per sec", + "title": "GO Goroutines and Threads", "type": "timeseries" }, { @@ -3930,7 +3507,6 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "", "fieldConfig": { "defaults": { "color": { @@ -3940,6 +3516,7 @@ "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", + "axisGridShow": true, "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -3981,8 +3558,7 @@ } ] }, - "unit": "decbytes", - "unitScale": true + "unit": "percent" }, "overrides": [] }, @@ -3990,13 +3566,13 @@ "h": 5, "w": 8, "x": 0, - "y": 71 + "y": 59 }, - "id": 148, + "id": 106, "options": { "legend": { "calcs": [ - "max" + "mean" ], "displayMode": "list", "placement": "bottom", @@ -4007,90 +3583,25 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, + "editorMode": "code", "exemplar": true, - "expr": "process_virtual_memory_bytes{instance=~\"$instance\"}", - "hide": true, + "expr": "increase(process_cpu_seconds_total{instance=~\"$instance\"}[1m])", + "format": "time_series", "interval": "", - "legendFormat": "resident virtual mem: {{instance}}", + "intervalFactor": 1, + "legendFormat": "system: {{instance}}", + "range": true, "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "process_resident_memory_anon_bytes{instance=~\"$instance\"}", - "hide": true, - "interval": "", - "legendFormat": "resident anon mem: {{instance}}", - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "process_resident_memory_bytes{instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "resident mem: {{instance}}", - "refId": "C" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "mem_data{instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "data: {{instance}}", - "refId": "D" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "mem_stack{instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "stack: {{instance}}", - "refId": "E" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "mem_locked{instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "locked: {{instance}}", - "refId": "F" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "mem_swap{instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "swap: {{instance}}", - "refId": "G" } ], - "title": "mem: resident set size", + "title": "CPU", "type": "timeseries" }, { @@ -4149,18 +3660,17 @@ } ] }, - "unit": "short", - "unitScale": true + "unit": "decbytes" }, "overrides": [] }, "gridPos": { "h": 5, "w": 8, - "x": 0, - "y": 76 + "x": 8, + "y": 59 }, - "id": 86, + "id": 154, "options": { "legend": { "calcs": [ @@ -4175,7 +3685,7 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { @@ -4184,11 +3694,11 @@ }, "editorMode": "code", "exemplar": true, - "expr": "rate(go_memstats_mallocs_total{instance=~\"$instance\"}[$rate_interval])", + "expr": "go_memstats_stack_sys_bytes{instance=~\"$instance\"}", "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "memstats_mallocs_total: {{ instance }}", + "legendFormat": "stack_sys: {{ instance }}", "range": true, "refId": "A" }, @@ -4199,17 +3709,81 @@ }, "editorMode": "code", "exemplar": true, - "expr": "rate(go_memstats_frees_total{instance=~\"$instance\"}[$rate_interval])", + "expr": "go_memstats_sys_bytes{instance=~\"$instance\"}", "format": "time_series", "hide": false, "interval": "", "intervalFactor": 1, - "legendFormat": "memstats_frees_total: {{ instance }}", + "legendFormat": "sys: {{ instance }}", "range": true, "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "go_memstats_stack_inuse_bytes{instance=~\"$instance\"}", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "stack_inuse: {{ instance }}", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "go_memstats_mspan_sys_bytes{instance=~\"$instance\"}", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "mspan_sys: {{ instance }}", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "go_memstats_mcache_sys_bytes{instance=~\"$instance\"}", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "mcache_sys: {{ instance }}", + "range": true, + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "go_memstats_heap_alloc_bytes{instance=~\"$instance\"}", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "heap_alloc: {{ instance }}", + "range": true, + "refId": "F" } ], - "title": "Process Mem: allocate objects/sec, free", + "title": "go memstat", "type": "timeseries" }, { @@ -4226,12 +3800,11 @@ "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", - "axisGridShow": true, "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", - "fillOpacity": 0, + "fillOpacity": 10, "gradientMode": "none", "hideFrom": { "legend": false, @@ -4268,23 +3841,20 @@ } ] }, - "unit": "percent", - "unitScale": true + "unit": "s" }, "overrides": [] }, "gridPos": { "h": 5, "w": 8, - "x": 0, - "y": 81 + "x": 16, + "y": 59 }, - "id": 106, + "id": 124, "options": { "legend": { - "calcs": [ - "mean" - ], + "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true @@ -4294,7 +3864,7 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { @@ -4303,16 +3873,14 @@ }, "editorMode": "code", "exemplar": true, - "expr": "increase(process_cpu_seconds_total{instance=~\"$instance\"}[1m])", - "format": "time_series", + "expr": "rate(go_gc_duration_seconds{quantile=\"0.75\",instance=~\"$instance\"}[$rate_interval])", + "instant": false, "interval": "", - "intervalFactor": 1, - "legendFormat": "system: {{instance}}", - "range": true, + "legendFormat": "", "refId": "A" } ], - "title": "CPU", + "title": "GC Stop the World per sec", "type": "timeseries" }, { @@ -4324,7 +3892,7 @@ "h": 1, "w": 24, "x": 0, - "y": 86 + "y": 64 }, "id": 173, "panels": [], @@ -4394,8 +3962,7 @@ } ] }, - "unit": "s", - "unitScale": true + "unit": "s" }, "overrides": [] }, @@ -4403,7 +3970,7 @@ "h": 8, "w": 12, "x": 0, - "y": 87 + "y": 65 }, "id": 175, "options": { @@ -4552,8 +4119,7 @@ } ] }, - "unit": "reqps", - "unitScale": true + "unit": "reqps" }, "overrides": [] }, @@ -4561,7 +4127,7 @@ "h": 8, "w": 12, "x": 12, - "y": 87 + "y": 65 }, "id": 177, "options": { @@ -4700,8 +4266,7 @@ "value": 80 } ] - }, - "unitScale": true + } }, "overrides": [] }, @@ -4709,7 +4274,7 @@ "h": 6, "w": 8, "x": 0, - "y": 95 + "y": 73 }, "id": 176, "options": { @@ -4798,8 +4363,7 @@ "value": 80 } ] - }, - "unitScale": true + } }, "overrides": [] }, @@ -4807,7 +4371,7 @@ "h": 6, "w": 8, "x": 8, - "y": 95 + "y": 73 }, "id": 180, "options": { @@ -4909,8 +4473,7 @@ } ] }, - "unit": "short", - "unitScale": true + "unit": "short" }, "overrides": [] }, @@ -4918,7 +4481,7 @@ "h": 6, "w": 8, "x": 16, - "y": 95 + "y": 73 }, "id": 181, "options": { @@ -5020,8 +4583,7 @@ } ] }, - "unit": "binBps", - "unitScale": true + "unit": "binBps" }, "overrides": [] }, @@ -5029,7 +4591,7 @@ "h": 6, "w": 8, "x": 0, - "y": 101 + "y": 79 }, "id": 178, "options": { @@ -5073,7 +4635,7 @@ "h": 1, "w": 24, "x": 0, - "y": 107 + "y": 85 }, "id": 183, "panels": [], @@ -5143,8 +4705,7 @@ } ] }, - "unit": "reqps", - "unitScale": true + "unit": "reqps" }, "overrides": [] }, @@ -5152,7 +4713,7 @@ "h": 8, "w": 12, "x": 0, - "y": 108 + "y": 86 }, "id": 185, "options": { @@ -5253,8 +4814,7 @@ } ] }, - "unit": "s", - "unitScale": true + "unit": "s" }, "overrides": [] }, @@ -5262,7 +4822,7 @@ "h": 8, "w": 12, "x": 12, - "y": 108 + "y": 86 }, "id": 186, "options": { @@ -5351,8 +4911,7 @@ } ] }, - "unit": "s", - "unitScale": true + "unit": "s" }, "overrides": [] }, @@ -5360,7 +4919,7 @@ "h": 8, "w": 12, "x": 0, - "y": 116 + "y": 94 }, "id": 187, "options": { @@ -5449,8 +5008,7 @@ } ] }, - "unit": "none", - "unitScale": true + "unit": "none" }, "overrides": [] }, @@ -5458,7 +5016,7 @@ "h": 8, "w": 12, "x": 12, - "y": 116 + "y": 94 }, "id": 188, "options": { @@ -5473,7 +5031,7 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { @@ -5556,8 +5114,7 @@ } ] }, - "unit": "short", - "unitScale": true + "unit": "short" }, "overrides": [] }, @@ -5565,7 +5122,7 @@ "h": 6, "w": 8, "x": 8, - "y": 124 + "y": 102 }, "id": 189, "options": { @@ -5690,8 +5247,7 @@ "value": 80 } ] - }, - "unitScale": true + } }, "overrides": [] }, @@ -5699,7 +5255,7 @@ "h": 6, "w": 8, "x": 16, - "y": 124 + "y": 102 }, "id": 184, "options": { @@ -5757,7 +5313,7 @@ "h": 1, "w": 24, "x": 0, - "y": 130 + "y": 108 }, "id": 75, "panels": [], @@ -5827,41 +5383,15 @@ } ] }, - "unit": "Bps", - "unitScale": true + "unit": "Bps" }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "egress: mainnet2-1:6061" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + "overrides": [] }, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 131 + "y": 109 }, "id": 96, "options": { @@ -5881,7 +5411,7 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { @@ -5973,8 +5503,7 @@ } ] }, - "unit": "none", - "unitScale": true + "unit": "none" }, "overrides": [] }, @@ -5982,7 +5511,7 @@ "h": 9, "w": 12, "x": 12, - "y": 131 + "y": 109 }, "id": 77, "options": { @@ -6002,7 +5531,7 @@ "sort": "none" } }, - "pluginVersion": "8.0.6", + "pluginVersion": "10.3.4", "targets": [ { "datasource": { @@ -6104,7 +5633,11 @@ "type": "custom" }, { - "current": {}, + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" @@ -6255,6 +5788,6 @@ "timezone": "", "title": "Erigon Internals", "uid": "b42a61d7-02b1-416c-8ab4-b9c864356174", - "version": 14, + "version": 22, "weekStart": "" } \ No newline at end of file diff --git a/core/state/rw_v3.go b/core/state/rw_v3.go index 3b149ed8fbe..d793a53efdf 100644 --- a/core/state/rw_v3.go +++ b/core/state/rw_v3.go @@ -5,6 +5,7 @@ import ( "encoding/binary" "fmt" "sync" + "time" "github.com/holiman/uint256" "github.com/ledgerwatch/log/v3" @@ -248,12 +249,21 @@ func (rs *StateV3) ApplyLogsAndTraces4(txTask *TxTask, domains *libstate.SharedD return nil } +var ( + mxState3UnwindRunning = metrics.GetOrCreateGauge("state3_unwind_running") + mxState3Unwind = metrics.GetOrCreateSummary("state3_unwind") +) + func (rs *StateV3) Unwind(ctx context.Context, tx kv.RwTx, blockUnwindTo, txUnwindTo uint64, accumulator *shards.Accumulator) error { unwindToLimit := tx.(libstate.HasAggTx).AggTx().(*libstate.AggregatorRoTx).CanUnwindDomainsToTxNum() if txUnwindTo < unwindToLimit { return fmt.Errorf("can't unwind to txNum=%d, limit is %d", txUnwindTo, unwindToLimit) } + mxState3UnwindRunning.Inc() + defer mxState3UnwindRunning.Dec() + st := time.Now() + defer mxState3Unwind.ObserveDuration(st) var currentInc uint64 handle := func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { @@ -296,6 +306,7 @@ func (rs *StateV3) Unwind(ctx context.Context, tx kv.RwTx, blockUnwindTo, txUnwi ttx := tx.(kv.TemporalTx) + // todo these updates could be collected during rs.domains.Unwind (as passed collect function eg) { iter, err := ttx.HistoryRange(kv.AccountsHistory, int(txUnwindTo), -1, order.Asc, -1) if err != nil { diff --git a/erigon-lib/commitment/commitment.go b/erigon-lib/commitment/commitment.go index d15562c7fcf..55e0dd81cc6 100644 --- a/erigon-lib/commitment/commitment.go +++ b/erigon-lib/commitment/commitment.go @@ -5,25 +5,23 @@ import ( "context" "encoding/binary" "fmt" - "math/bits" - "strings" - "github.com/google/btree" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/cryptozerocopy" - "github.com/ledgerwatch/erigon-lib/types" - "golang.org/x/crypto/sha3" - "github.com/ledgerwatch/erigon-lib/metrics" + "github.com/ledgerwatch/erigon-lib/types" "github.com/ledgerwatch/log/v3" + "golang.org/x/crypto/sha3" + "math/bits" + "strings" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/etl" ) var ( - mxCommitmentKeys = metrics.GetOrCreateCounter("domain_commitment_keys") - mxCommitmentBranchUpdates = metrics.GetOrCreateCounter("domain_commitment_updates_applied") + mxKeys = metrics.GetOrCreateCounter("domain_commitment_keys") + mxBranchUpdatesApplied = metrics.GetOrCreateCounter("domain_commitment_updates_applied") ) // Trie represents commitment variant. @@ -185,6 +183,7 @@ func (be *BranchEncoder) Load(pc PatriciaContext, args etl.TransformArgs) error if err = pc.PutBranch(cp, cu, stateValue, stateStep); err != nil { return err } + mxBranchUpdatesApplied.Inc() return nil }, args); err != nil { return err @@ -221,7 +220,6 @@ func (be *BranchEncoder) CollectUpdate( if err = be.updates.Collect(prefix, update); err != nil { return 0, err } - mxCommitmentBranchUpdates.Inc() return lastNibble, nil } diff --git a/erigon-lib/commitment/hex_patricia_hashed.go b/erigon-lib/commitment/hex_patricia_hashed.go index 130ec4eb4a1..0f2414a7831 100644 --- a/erigon-lib/commitment/hex_patricia_hashed.go +++ b/erigon-lib/commitment/hex_patricia_hashed.go @@ -1345,7 +1345,7 @@ func (hph *HexPatriciaHashed) ProcessTree(ctx context.Context, tree *UpdateTree, } hph.deleteCell(hashedKey) } - mxCommitmentKeys.Inc() + mxKeys.Inc() ki++ return nil }) @@ -1450,7 +1450,7 @@ func (hph *HexPatriciaHashed) ProcessKeys(ctx context.Context, plainKeys [][]byt } hph.deleteCell(hashedKey) } - mxCommitmentKeys.Inc() + mxKeys.Inc() } // Folding everything up to the root for hph.activeRows > 0 { @@ -1546,7 +1546,7 @@ func (hph *HexPatriciaHashed) ProcessUpdates(ctx context.Context, plainKeys [][] } } - mxCommitmentKeys.Inc() + mxKeys.Inc() } // Folding everything up to the root for hph.activeRows > 0 { diff --git a/erigon-lib/metrics/register.go b/erigon-lib/metrics/register.go index 2ac13a6b4ca..4a2e68f55e4 100644 --- a/erigon-lib/metrics/register.go +++ b/erigon-lib/metrics/register.go @@ -140,8 +140,8 @@ func GetOrCreateSummary(name string) Summary { // - foo{bar="baz",aaa="b"} // // The returned histogram is safe to use from concurrent goroutines. -func NewHistogram(name string) Histogram { - h, err := defaultSet.NewHistogram(name) +func NewHistogram(name string, buckets []float64) Histogram { + h, err := defaultSet.NewHistogram(name, buckets) if err != nil { panic(fmt.Errorf("could not create new histogram: %w", err)) } @@ -171,3 +171,12 @@ func GetOrCreateHistogram(name string) Histogram { return &histogram{h} } + +func GetOrCreateHistogramWithBuckets(name string) Histogram { + h, err := defaultSet.GetOrCreateHistogram(name) + if err != nil { + panic(fmt.Errorf("could not get or create new histogram: %w", err)) + } + + return &histogram{h} +} diff --git a/erigon-lib/metrics/set.go b/erigon-lib/metrics/set.go index 2b0418fd2bd..ad4b164c239 100644 --- a/erigon-lib/metrics/set.go +++ b/erigon-lib/metrics/set.go @@ -78,8 +78,8 @@ func (s *Set) Collect(ch chan<- prometheus.Metric) { // - foo{bar="baz",aaa="b"} // // The returned histogram is safe to use from concurrent goroutines. -func (s *Set) NewHistogram(name string, help ...string) (prometheus.Histogram, error) { - h, err := newHistogram(name, help...) +func (s *Set) NewHistogram(name string, buckets []float64, help ...string) (prometheus.Histogram, error) { + h, err := newHistogram(name, buckets, help...) if err != nil { return nil, err } @@ -88,7 +88,7 @@ func (s *Set) NewHistogram(name string, help ...string) (prometheus.Histogram, e return h, nil } -func newHistogram(name string, help ...string) (prometheus.Histogram, error) { +func newHistogram(name string, buckets []float64, help ...string) (prometheus.Histogram, error) { name, labels, err := parseMetric(name) if err != nil { return nil, err @@ -97,6 +97,7 @@ func newHistogram(name string, help ...string) (prometheus.Histogram, error) { return prometheus.NewHistogram(prometheus.HistogramOpts{ Name: name, ConstLabels: labels, + Buckets: buckets, Help: strings.Join(help, " "), }), nil } @@ -119,7 +120,7 @@ func (s *Set) GetOrCreateHistogram(name string, help ...string) (prometheus.Hist nm := s.m[name] s.mu.Unlock() if nm == nil { - metric, err := newHistogram(name, help...) + metric, err := newHistogram(name, nil, help...) if err != nil { return nil, fmt.Errorf("invalid metric name %q: %w", name, err) } diff --git a/erigon-lib/state/aggregator.go b/erigon-lib/state/aggregator.go index b5041647000..cce320a8374 100644 --- a/erigon-lib/state/aggregator.go +++ b/erigon-lib/state/aggregator.go @@ -49,14 +49,9 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/iter" "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" - "github.com/ledgerwatch/erigon-lib/metrics" "github.com/ledgerwatch/erigon-lib/seg" ) -var ( - mxPruneTookAgg = metrics.GetOrCreateSummary(`prune_seconds{type="state"}`) -) - type Aggregator struct { db kv.RoDB d [kv.DomainLen]*Domain diff --git a/erigon-lib/state/aggregator_files.go b/erigon-lib/state/aggregator_files.go index 53e3e01c32e..ad5f8dbdb93 100644 --- a/erigon-lib/state/aggregator_files.go +++ b/erigon-lib/state/aggregator_files.go @@ -23,48 +23,6 @@ import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/metrics" -) - -// StepsInBiggestFile - files of this size are completely frozen/immutable. -// files of smaller size are also immutable, but can be removed after merge to bigger files. -const StepsInBiggestFile = 32 - -var ( - //LatestStateReadWarm = metrics.GetOrCreateSummary(`latest_state_read{type="warm",found="yes"}`) //nolint - //LatestStateReadWarmNotFound = metrics.GetOrCreateSummary(`latest_state_read{type="warm",found="no"}`) //nolint - //LatestStateReadGrind = metrics.GetOrCreateSummary(`latest_state_read{type="grind",found="yes"}`) //nolint - //LatestStateReadGrindNotFound = metrics.GetOrCreateSummary(`latest_state_read{type="grind",found="no"}`) //nolint - //LatestStateReadCold = metrics.GetOrCreateSummary(`latest_state_read{type="cold",found="yes"}`) //nolint - //LatestStateReadColdNotFound = metrics.GetOrCreateSummary(`latest_state_read{type="cold",found="no"}`) //nolint - mxPrunableDAcc = metrics.GetOrCreateGauge(`domain_prunable{type="domain",table="account"}`) - mxPrunableDSto = metrics.GetOrCreateGauge(`domain_prunable{type="domain",table="storage"}`) - mxPrunableDCode = metrics.GetOrCreateGauge(`domain_prunable{type="domain",table="code"}`) - mxPrunableDComm = metrics.GetOrCreateGauge(`domain_prunable{type="domain",table="commitment"}`) - mxPrunableHAcc = metrics.GetOrCreateGauge(`domain_prunable{type="history",table="account"}`) - mxPrunableHSto = metrics.GetOrCreateGauge(`domain_prunable{type="history",table="storage"}`) - mxPrunableHCode = metrics.GetOrCreateGauge(`domain_prunable{type="history",table="code"}`) - mxPrunableHComm = metrics.GetOrCreateGauge(`domain_prunable{type="history",table="commitment"}`) - - mxRunningMerges = metrics.GetOrCreateGauge("domain_running_merges") - mxRunningFilesBuilding = metrics.GetOrCreateGauge("domain_running_files_building") - mxCollateTook = metrics.GetOrCreateHistogram(`domain_collate_took{type="domain"}`) - mxCollateTookHistory = metrics.GetOrCreateHistogram(`domain_collate_took{type="history"}`) - mxCollateTookIndex = metrics.GetOrCreateHistogram(`domain_collate_took{type="index"}`) - mxPruneTookDomain = metrics.GetOrCreateHistogram(`domain_prune_took{type="domain"}`) - mxPruneTookHistory = metrics.GetOrCreateHistogram(`domain_prune_took{type="history"}`) - mxPruneTookIndex = metrics.GetOrCreateHistogram(`domain_prune_took{type="index"}`) - mxPruneInProgress = metrics.GetOrCreateGauge("domain_pruning_progress") - mxCollationSize = metrics.GetOrCreateGauge("domain_collation_size") - mxCollationSizeHist = metrics.GetOrCreateGauge("domain_collation_hist_size") - mxPruneSizeDomain = metrics.GetOrCreateCounter(`domain_prune_size{type="domain"}`) - mxPruneSizeHistory = metrics.GetOrCreateCounter(`domain_prune_size{type="history"}`) - mxPruneSizeIndex = metrics.GetOrCreateCounter(`domain_prune_size{type="index"}`) - mxBuildTook = metrics.GetOrCreateSummary("domain_build_files_took") - mxStepTook = metrics.GetOrCreateHistogram("domain_step_took") - mxFlushTook = metrics.GetOrCreateSummary("domain_flush_took") - mxCommitmentRunning = metrics.GetOrCreateGauge("domain_running_commitment") - mxCommitmentTook = metrics.GetOrCreateSummary("domain_commitment_took") ) type SelectedStaticFilesV3 struct { diff --git a/erigon-lib/state/domain.go b/erigon-lib/state/domain.go index c0a657c7178..9d5cfbf5b65 100644 --- a/erigon-lib/state/domain.go +++ b/erigon-lib/state/domain.go @@ -1182,6 +1182,10 @@ func (dt *DomainRoTx) Unwind(ctx context.Context, rwTx kv.RwTx, step, txNumUnwin if err != nil { return fmt.Errorf("historyRange %s: %w", dt.ht.h.filenameBase, err) } + sf := time.Now() + defer mxUnwindTook.ObserveDuration(sf) + mxRunningUnwind.Inc() + defer mxRunningUnwind.Dec() defer histRng.Close() seen := make(map[string]struct{}) diff --git a/erigon-lib/state/domain_shared.go b/erigon-lib/state/domain_shared.go index ca0e4e8e77a..72971ba8a2b 100644 --- a/erigon-lib/state/domain_shared.go +++ b/erigon-lib/state/domain_shared.go @@ -120,6 +120,8 @@ func (sd *SharedDomains) Unwind(ctx context.Context, rwTx kv.RwTx, blockUnwindTo sd.aggTx.a.logger.Info("aggregator unwind", "step", step, "txUnwindTo", txUnwindTo, "stepsRangeInDB", sd.aggTx.a.StepsRangeInDBAsStr(rwTx)) //fmt.Printf("aggregator unwind step %d txUnwindTo %d stepsRangeInDB %s\n", step, txUnwindTo, sd.aggTx.a.StepsRangeInDBAsStr(rwTx)) + sf := time.Now() + defer mxUnwindSharedTook.ObserveDuration(sf) if err := sd.Flush(ctx, rwTx); err != nil { return err diff --git a/erigon-lib/state/metrics.go b/erigon-lib/state/metrics.go new file mode 100644 index 00000000000..5b0b48df4f2 --- /dev/null +++ b/erigon-lib/state/metrics.go @@ -0,0 +1,59 @@ +/* + Copyright 2024 Erigon contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package state + +import "github.com/ledgerwatch/erigon-lib/metrics" + +var ( + //LatestStateReadWarm = metrics.GetOrCreateSummary(`latest_state_read{type="warm",found="yes"}`) //nolint + //LatestStateReadWarmNotFound = metrics.GetOrCreateSummary(`latest_state_read{type="warm",found="no"}`) //nolint + //LatestStateReadGrind = metrics.GetOrCreateSummary(`latest_state_read{type="grind",found="yes"}`) //nolint + //LatestStateReadGrindNotFound = metrics.GetOrCreateSummary(`latest_state_read{type="grind",found="no"}`) //nolint + //LatestStateReadCold = metrics.GetOrCreateSummary(`latest_state_read{type="cold",found="yes"}`) //nolint + //LatestStateReadColdNotFound = metrics.GetOrCreateSummary(`latest_state_read{type="cold",found="no"}`) //nolint + mxPruneTookAgg = metrics.GetOrCreateSummary(`prune_seconds{type="state"}`) + mxPrunableDAcc = metrics.GetOrCreateGauge(`domain_prunable{type="domain",table="account"}`) + mxPrunableDSto = metrics.GetOrCreateGauge(`domain_prunable{type="domain",table="storage"}`) + mxPrunableDCode = metrics.GetOrCreateGauge(`domain_prunable{type="domain",table="code"}`) + mxPrunableDComm = metrics.GetOrCreateGauge(`domain_prunable{type="domain",table="commitment"}`) + mxPrunableHAcc = metrics.GetOrCreateGauge(`domain_prunable{type="history",table="account"}`) + mxPrunableHSto = metrics.GetOrCreateGauge(`domain_prunable{type="history",table="storage"}`) + mxPrunableHCode = metrics.GetOrCreateGauge(`domain_prunable{type="history",table="code"}`) + mxPrunableHComm = metrics.GetOrCreateGauge(`domain_prunable{type="history",table="commitment"}`) + mxUnwindTook = metrics.GetOrCreateHistogram(`domain_unwind_took{type="domain"}`) + mxUnwindSharedTook = metrics.GetOrCreateHistogram(`domain_unwind_took{type="shared"}`) + mxRunningUnwind = metrics.GetOrCreateGauge("domain_running_unwind") + mxRunningMerges = metrics.GetOrCreateGauge("domain_running_merges") + mxRunningFilesBuilding = metrics.GetOrCreateGauge("domain_running_files_building") + mxCollateTook = metrics.GetOrCreateHistogram(`domain_collate_took{type="domain"}`) + mxCollateTookHistory = metrics.GetOrCreateHistogram(`domain_collate_took{type="history"}`) + mxCollateTookIndex = metrics.GetOrCreateHistogram(`domain_collate_took{type="index"}`) + mxPruneTookDomain = metrics.GetOrCreateHistogram(`domain_prune_took{type="domain"}`) + mxPruneTookHistory = metrics.GetOrCreateHistogram(`domain_prune_took{type="history"}`) + mxPruneTookIndex = metrics.GetOrCreateHistogram(`domain_prune_took{type="index"}`) + mxPruneInProgress = metrics.GetOrCreateGauge("domain_pruning_progress") + mxCollationSize = metrics.GetOrCreateGauge("domain_collation_size") + mxCollationSizeHist = metrics.GetOrCreateGauge("domain_collation_hist_size") + mxPruneSizeDomain = metrics.GetOrCreateCounter(`domain_prune_size{type="domain"}`) + mxPruneSizeHistory = metrics.GetOrCreateCounter(`domain_prune_size{type="history"}`) + mxPruneSizeIndex = metrics.GetOrCreateCounter(`domain_prune_size{type="index"}`) + mxBuildTook = metrics.GetOrCreateSummary("domain_build_files_took") + mxStepTook = metrics.GetOrCreateSummary("domain_step_took") + mxFlushTook = metrics.GetOrCreateSummary("domain_flush_took") + mxCommitmentRunning = metrics.GetOrCreateGauge("domain_running_commitment") + mxCommitmentTook = metrics.GetOrCreateSummary("domain_commitment_took") +) From 0deb029068a66a237c1d7ee97d07f5f7ea3f1c28 Mon Sep 17 00:00:00 2001 From: Dmytro Date: Tue, 14 May 2024 13:12:26 +0100 Subject: [PATCH 43/48] diagnostics: downloaded file details (#10331) If snapshot file downloaded diagnostics command will display sum of downloading process which includes: size, total download time and average download rate. Example: ![Screenshot 2024-05-14 at 11 38 35](https://github.com/ledgerwatch/erigon/assets/29065143/135821d2-c207-4262-9617-74ea8188859c) --- cmd/diag/downloader/diag_downloader.go | 92 +++++++++++++++++--------- 1 file changed, 61 insertions(+), 31 deletions(-) diff --git a/cmd/diag/downloader/diag_downloader.go b/cmd/diag/downloader/diag_downloader.go index ab5aaf496bc..de17ffffda9 100644 --- a/cmd/diag/downloader/diag_downloader.go +++ b/cmd/diag/downloader/diag_downloader.go @@ -143,35 +143,51 @@ func printFile(cliCtx *cli.Context) error { snapDownload := data.SnapshotDownload if file, ok := snapDownload.SegmentsDownloading[cliCtx.String(FileNameFlag.Name)]; ok { - fileRow := getFileRow(file) - filePeers := getPeersRows(file.Peers) - fileWebseeds := getPeersRows(file.Webseeds) - - switch cliCtx.String(flags.OutputFlag.Name) { - case "json": - util.RenderJson(fileRow) - util.RenderJson(filePeers) - util.RenderJson(fileWebseeds) - case "text": - //Print file status - util.RenderTableWithHeader( - "file download info:", - table.Row{"File", "Progress", "Total", "Downloaded", "Peers", "Peers Download Rate", "Webseeds", "Webseeds Download Rate", "Time Left", "Active"}, - []table.Row{fileRow}, - ) - - //Print peers and webseeds status - util.RenderTableWithHeader( - "", - table.Row{"Peer", "Download Rate"}, - filePeers, - ) - - util.RenderTableWithHeader( - "", - table.Row{"Webseed", "Download Rate"}, - fileWebseeds, - ) + + if file.DownloadedBytes >= file.TotalBytes { + fileRow := getDownloadedFileRow(file) + switch cliCtx.String(flags.OutputFlag.Name) { + case "json": + util.RenderJson(fileRow) + case "text": + //Print file status + util.RenderTableWithHeader( + "File download info:", + table.Row{"File", "Size", "Average Download Rate", "Time Took"}, + []table.Row{fileRow}, + ) + } + } else { + fileRow := getFileRow(file) + filePeers := getPeersRows(file.Peers) + fileWebseeds := getPeersRows(file.Webseeds) + + switch cliCtx.String(flags.OutputFlag.Name) { + case "json": + util.RenderJson(fileRow) + util.RenderJson(filePeers) + util.RenderJson(fileWebseeds) + case "text": + //Print file status + util.RenderTableWithHeader( + "file download info:", + table.Row{"File", "Progress", "Total", "Downloaded", "Peers", "Peers Download Rate", "Webseeds", "Webseeds Download Rate", "Time Left", "Active"}, + []table.Row{fileRow}, + ) + + //Print peers and webseeds status + util.RenderTableWithHeader( + "", + table.Row{"Peer", "Download Rate"}, + filePeers, + ) + + util.RenderTableWithHeader( + "", + table.Row{"Webseed", "Download Rate"}, + fileWebseeds, + ) + } } } else { txt := text.Colors{text.FgWhite, text.BgRed} @@ -181,6 +197,20 @@ func printFile(cliCtx *cli.Context) error { return nil } +func getDownloadedFileRow(file diagnostics.SegmentDownloadStatistics) table.Row { + averageDownloadRate := common.ByteCount(file.DownloadedStats.AverageRate) + "/s" + totalDownloadTimeString := time.Duration(file.DownloadedStats.TimeTook) * time.Second + + row := table.Row{ + file.Name, + common.ByteCount(file.TotalBytes), + averageDownloadRate, + totalDownloadTimeString.String(), + } + + return row +} + func getSnapshotStatusRow(snapDownload diagnostics.SnapshotDownloadStatistics) table.Row { status := "Downloading" if snapDownload.DownloadFinished { @@ -192,7 +222,7 @@ func getSnapshotStatusRow(snapDownload diagnostics.SnapshotDownloadStatistics) t remainingBytes := snapDownload.Total - snapDownload.Downloaded downloadTimeLeft := util.CalculateTime(remainingBytes, snapDownload.DownloadRate) - totalDownloadTimeString := time.Duration(snapDownload.TotalTime * float64(time.Second)).String() + totalDownloadTimeString := time.Duration(snapDownload.TotalTime) * time.Second rowObj := table.Row{ status, // Status @@ -200,7 +230,7 @@ func getSnapshotStatusRow(snapDownload diagnostics.SnapshotDownloadStatistics) t common.ByteCount(snapDownload.Downloaded), // Downloaded common.ByteCount(snapDownload.Total), // Total downloadTimeLeft, // Time Left - totalDownloadTimeString, // Total Time + totalDownloadTimeString.String(), // Total Time common.ByteCount(snapDownload.DownloadRate) + "/s", // Download Rate common.ByteCount(snapDownload.UploadRate) + "/s", // Upload Rate snapDownload.Peers, // Peers From 412e92f082bdd444e82aa6b835986e7307aca895 Mon Sep 17 00:00:00 2001 From: Dmytro Date: Tue, 14 May 2024 15:07:21 +0100 Subject: [PATCH 44/48] diagnostics: avoid print empty tables (#10335) --- cmd/diag/util/util.go | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/cmd/diag/util/util.go b/cmd/diag/util/util.go index 92e2a25d0e1..277f7259886 100644 --- a/cmd/diag/util/util.go +++ b/cmd/diag/util/util.go @@ -67,18 +67,26 @@ func RenderTableWithHeader(title string, header table.Row, rows []table.Row) { if title != "" { txt := text.Colors{text.FgBlue, text.Bold} fmt.Println(txt.Sprint(title)) - } - t := table.NewWriter() - t.SetOutputMirror(os.Stdout) + if len(rows) == 0 { + txt := text.Colors{text.FgRed, text.Bold} + fmt.Println(txt.Sprint("No data to show")) + } + } - t.AppendHeader(header) if len(rows) > 0 { - t.AppendRows(rows) + t := table.NewWriter() + t.SetOutputMirror(os.Stdout) + + t.AppendHeader(header) + if len(rows) > 0 { + t.AppendRows(rows) + } + + t.AppendSeparator() + t.Render() } - t.AppendSeparator() - t.Render() fmt.Print("\n") } From 3fd018d225d1281322676fe960e64d985ad85857 Mon Sep 17 00:00:00 2001 From: Dmytro Date: Tue, 14 May 2024 15:07:57 +0100 Subject: [PATCH 45/48] diagnostics: fixed downloaded > 100 (#10336) Fixed issue when file downloaded more that 100% --- cmd/diag/downloader/diag_downloader.go | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/cmd/diag/downloader/diag_downloader.go b/cmd/diag/downloader/diag_downloader.go index de17ffffda9..bb213d26def 100644 --- a/cmd/diag/downloader/diag_downloader.go +++ b/cmd/diag/downloader/diag_downloader.go @@ -217,7 +217,7 @@ func getSnapshotStatusRow(snapDownload diagnostics.SnapshotDownloadStatistics) t status = "Finished" } - downloadedPercent := float32(snapDownload.Downloaded) / float32(snapDownload.Total/100) + downloadedPercent := getPercentDownloaded(snapDownload.Downloaded, snapDownload.Total) remainingBytes := snapDownload.Total - snapDownload.Downloaded downloadTimeLeft := util.CalculateTime(remainingBytes, snapDownload.DownloadRate) @@ -225,8 +225,8 @@ func getSnapshotStatusRow(snapDownload diagnostics.SnapshotDownloadStatistics) t totalDownloadTimeString := time.Duration(snapDownload.TotalTime) * time.Second rowObj := table.Row{ - status, // Status - fmt.Sprintf("%.2f%%", downloadedPercent), // Progress + status, // Status + downloadedPercent, // Progress common.ByteCount(snapDownload.Downloaded), // Downloaded common.ByteCount(snapDownload.Total), // Total downloadTimeLeft, // Time Left @@ -247,7 +247,7 @@ func getFileRow(file diagnostics.SegmentDownloadStatistics) table.Row { peersDownloadRate := getFileDownloadRate(file.Peers) webseedsDownloadRate := getFileDownloadRate(file.Webseeds) totalDownloadRate := peersDownloadRate + webseedsDownloadRate - downloadedPercent := float32(file.DownloadedBytes) / float32(file.TotalBytes/100) + downloadedPercent := getPercentDownloaded(file.DownloadedBytes, file.TotalBytes) remainingBytes := file.TotalBytes - file.DownloadedBytes downloadTimeLeft := util.CalculateTime(remainingBytes, totalDownloadRate) isActive := "false" @@ -257,7 +257,7 @@ func getFileRow(file diagnostics.SegmentDownloadStatistics) table.Row { row := table.Row{ file.Name, - fmt.Sprintf("%.2f%%", downloadedPercent), + downloadedPercent, common.ByteCount(file.TotalBytes), common.ByteCount(file.DownloadedBytes), len(file.Peers), @@ -373,3 +373,13 @@ func filterQueued(rows []table.Row) []table.Row { return filtered } + +func getPercentDownloaded(downloaded, total uint64) string { + percent := float32(downloaded) / float32(total/100) + + if percent > 100 { + percent = 100 + } + + return fmt.Sprintf("%.2f%%", percent) +} From 68a7e41679a287b9e3b908bda9631152f819ee5a Mon Sep 17 00:00:00 2001 From: battlmonstr Date: Tue, 14 May 2024 16:31:57 +0200 Subject: [PATCH 46/48] polygon/heimdall: RangeIndex (#10297) --- eth/backend.go | 1 + polygon/heimdall/checkpoint.go | 4 ++ polygon/heimdall/entity.go | 1 + polygon/heimdall/entity_store.go | 102 ++++++++++++++++++++++++--- polygon/heimdall/milestone.go | 4 ++ polygon/heimdall/range_index.go | 93 ++++++++++++++++++++++++ polygon/heimdall/range_index_test.go | 95 +++++++++++++++++++++++++ polygon/heimdall/scraper.go | 36 +++++++--- polygon/heimdall/span.go | 4 ++ polygon/sync/service.go | 2 + 10 files changed, 325 insertions(+), 17 deletions(-) create mode 100644 polygon/heimdall/range_index.go create mode 100644 polygon/heimdall/range_index_test.go diff --git a/eth/backend.go b/eth/backend.go index 1d3ebecb2d7..89ccb228606 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -982,6 +982,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.polygonSyncService = polygonsync.NewService( logger, chainConfig, + tmpdir, sentryClient, p2pConfig.MaxPeers, statusDataProvider, diff --git a/polygon/heimdall/checkpoint.go b/polygon/heimdall/checkpoint.go index 88e595a5e5e..37ba2baa999 100644 --- a/polygon/heimdall/checkpoint.go +++ b/polygon/heimdall/checkpoint.go @@ -20,6 +20,10 @@ type Checkpoint struct { Fields WaypointFields } +func (c Checkpoint) RawId() uint64 { + return uint64(c.Id) +} + func (c Checkpoint) StartBlock() *big.Int { return c.Fields.StartBlock } diff --git a/polygon/heimdall/entity.go b/polygon/heimdall/entity.go index 316ea09189a..b6dcfb38e8f 100644 --- a/polygon/heimdall/entity.go +++ b/polygon/heimdall/entity.go @@ -1,5 +1,6 @@ package heimdall type Entity interface { + RawId() uint64 BlockNumRange() ClosedRange } diff --git a/polygon/heimdall/entity_store.go b/polygon/heimdall/entity_store.go index 5024b49e8dd..1064dfe025d 100644 --- a/polygon/heimdall/entity_store.go +++ b/polygon/heimdall/entity_store.go @@ -4,13 +4,19 @@ import ( "context" "encoding/binary" "encoding/json" + "sync" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/iter" ) type entityStore interface { + Prepare(ctx context.Context) error + Close() GetLastEntityId(ctx context.Context) (uint64, bool, error) + GetEntity(ctx context.Context, id uint64) (Entity, error) PutEntity(ctx context.Context, id uint64, entity Entity) error + FindByBlockNum(ctx context.Context, blockNum uint64) (Entity, error) } type entityStoreImpl struct { @@ -20,6 +26,9 @@ type entityStoreImpl struct { makeEntity func() Entity getLastEntityId func(ctx context.Context, tx kv.Tx) (uint64, bool, error) loadEntityBytes func(ctx context.Context, tx kv.Getter, id uint64) ([]byte, error) + + blockNumToIdIndex *RangeIndex + prepareOnce sync.Once } func newEntityStore( @@ -28,6 +37,7 @@ func newEntityStore( makeEntity func() Entity, getLastEntityId func(ctx context.Context, tx kv.Tx) (uint64, bool, error), loadEntityBytes func(ctx context.Context, tx kv.Getter, id uint64) ([]byte, error), + blockNumToIdIndex *RangeIndex, ) entityStore { return &entityStoreImpl{ tx: tx, @@ -36,35 +46,111 @@ func newEntityStore( makeEntity: makeEntity, getLastEntityId: getLastEntityId, loadEntityBytes: loadEntityBytes, + + blockNumToIdIndex: blockNumToIdIndex, } } +func (s *entityStoreImpl) Prepare(ctx context.Context) error { + var err error + s.prepareOnce.Do(func() { + iteratorFactory := func() (iter.KV, error) { return s.tx.Range(s.table, nil, nil) } + err = buildBlockNumToIdIndex(ctx, s.blockNumToIdIndex, iteratorFactory, s.entityUnmarshalJSON) + }) + return err +} + +func (s *entityStoreImpl) Close() { + s.blockNumToIdIndex.Close() +} + func (s *entityStoreImpl) GetLastEntityId(ctx context.Context) (uint64, bool, error) { return s.getLastEntityId(ctx, s.tx) } +func entityStoreKey(id uint64) [8]byte { + var key [8]byte + binary.BigEndian.PutUint64(key[:], id) + return key +} + +func (s *entityStoreImpl) entityUnmarshalJSON(jsonBytes []byte) (Entity, error) { + entity := s.makeEntity() + if err := json.Unmarshal(jsonBytes, entity); err != nil { + return nil, err + } + return entity, nil +} + func (s *entityStoreImpl) GetEntity(ctx context.Context, id uint64) (Entity, error) { jsonBytes, err := s.loadEntityBytes(ctx, s.tx, id) if err != nil { return nil, err } + // not found + if jsonBytes == nil { + return nil, nil + } - entity := s.makeEntity() - if err := json.Unmarshal(jsonBytes, entity); err != nil { + return s.entityUnmarshalJSON(jsonBytes) +} + +func (s *entityStoreImpl) PutEntity(ctx context.Context, id uint64, entity Entity) error { + jsonBytes, err := json.Marshal(entity) + if err != nil { + return err + } + + key := entityStoreKey(id) + err = s.tx.Put(s.table, key[:], jsonBytes) + if err != nil { + return err + } + + // update blockNumToIdIndex + return s.blockNumToIdIndex.Put(ctx, entity.BlockNumRange(), id) +} + +func (s *entityStoreImpl) FindByBlockNum(ctx context.Context, blockNum uint64) (Entity, error) { + id, err := s.blockNumToIdIndex.Lookup(ctx, blockNum) + if err != nil { return nil, err } + // not found + if id == 0 { + return nil, nil + } - return entity, nil + return s.GetEntity(ctx, id) } -func (s *entityStoreImpl) PutEntity(_ context.Context, id uint64, entity Entity) error { - jsonBytes, err := json.Marshal(entity) +func buildBlockNumToIdIndex( + ctx context.Context, + index *RangeIndex, + iteratorFactory func() (iter.KV, error), + entityUnmarshalJSON func([]byte) (Entity, error), +) error { + it, err := iteratorFactory() if err != nil { return err } + defer it.Close() + + for it.HasNext() { + _, jsonBytes, err := it.Next() + if err != nil { + return err + } - var idBytes [8]byte - binary.BigEndian.PutUint64(idBytes[:], id) + entity, err := entityUnmarshalJSON(jsonBytes) + if err != nil { + return err + } + + if err = index.Put(ctx, entity.BlockNumRange(), entity.RawId()); err != nil { + return err + } + } - return s.tx.Put(s.table, idBytes[:], jsonBytes) + return nil } diff --git a/polygon/heimdall/milestone.go b/polygon/heimdall/milestone.go index 7ffc246d33b..3d74dac7fcc 100644 --- a/polygon/heimdall/milestone.go +++ b/polygon/heimdall/milestone.go @@ -20,6 +20,10 @@ type Milestone struct { Fields WaypointFields } +func (m Milestone) RawId() uint64 { + return uint64(m.Id) +} + func (m Milestone) StartBlock() *big.Int { return m.Fields.StartBlock } diff --git a/polygon/heimdall/range_index.go b/polygon/heimdall/range_index.go new file mode 100644 index 00000000000..7919dae7043 --- /dev/null +++ b/polygon/heimdall/range_index.go @@ -0,0 +1,93 @@ +package heimdall + +import ( + "context" + "encoding/binary" + + "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/log/v3" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/mdbx" +) + +type RangeIndex struct { + db kv.RwDB +} + +const rangeIndexTableName = "Index" + +func NewRangeIndex(ctx context.Context, tmpDir string, logger log.Logger) (*RangeIndex, error) { + db, err := mdbx.NewMDBX(logger). + InMem(tmpDir). + WithTableCfg(func(_ kv.TableCfg) kv.TableCfg { return kv.TableCfg{rangeIndexTableName: {}} }). + MapSize(1 * datasize.GB). + Open(ctx) + if err != nil { + return nil, err + } + + return &RangeIndex{db}, nil +} + +func (i *RangeIndex) Close() { + i.db.Close() +} + +func rangeIndexKey(blockNum uint64) [8]byte { + var key [8]byte + binary.BigEndian.PutUint64(key[:], blockNum) + return key +} + +func rangeIndexValue(id uint64) [8]byte { + var value [8]byte + binary.BigEndian.PutUint64(value[:], id) + return value +} + +func rangeIndexValueParse(value []byte) uint64 { + return binary.BigEndian.Uint64(value) +} + +// Put a mapping from a range to an id. +func (i *RangeIndex) Put(ctx context.Context, r ClosedRange, id uint64) error { + tx, err := i.db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + + key := rangeIndexKey(r.End) + value := rangeIndexValue(id) + if err = tx.Put(rangeIndexTableName, key[:], value[:]); err != nil { + return err + } + return tx.Commit() +} + +// Lookup an id of a range by a blockNum within that range. +func (i *RangeIndex) Lookup(ctx context.Context, blockNum uint64) (uint64, error) { + var id uint64 + err := i.db.View(ctx, func(tx kv.Tx) error { + cursor, err := tx.Cursor(rangeIndexTableName) + if err != nil { + return err + } + defer cursor.Close() + + key := rangeIndexKey(blockNum) + _, value, err := cursor.Seek(key[:]) + if err != nil { + return err + } + // not found + if value == nil { + return nil + } + + id = rangeIndexValueParse(value) + return nil + }) + return id, err +} diff --git a/polygon/heimdall/range_index_test.go b/polygon/heimdall/range_index_test.go new file mode 100644 index 00000000000..f9094f67671 --- /dev/null +++ b/polygon/heimdall/range_index_test.go @@ -0,0 +1,95 @@ +package heimdall + +import ( + "context" + "testing" + + "github.com/ledgerwatch/log/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type rangeIndexTest struct { + index *RangeIndex + ctx context.Context + logger log.Logger +} + +func newRangeIndexTest(t *testing.T) rangeIndexTest { + tmpDir := t.TempDir() + ctx := context.Background() + logger := log.New() + index, err := NewRangeIndex(ctx, tmpDir, logger) + require.NoError(t, err) + + t.Cleanup(index.Close) + + return rangeIndexTest{ + index: index, + ctx: ctx, + logger: logger, + } +} + +func TestRangeIndexEmpty(t *testing.T) { + test := newRangeIndexTest(t) + actualId, err := test.index.Lookup(test.ctx, 1000) + require.NoError(t, err) + assert.Equal(t, uint64(0), actualId) +} + +func TestRangeIndex(t *testing.T) { + test := newRangeIndexTest(t) + ctx := test.ctx + + ranges := []ClosedRange{ + {100, 200 - 1}, + {200, 500 - 1}, + {500, 1000 - 1}, + {1000, 1200 - 1}, + {1200, 1500 - 1}, + } + + for i, r := range ranges { + require.NoError(t, test.index.Put(ctx, r, uint64(i+1))) + } + + examples := map[uint64]uint64{ + 100: 1, + 101: 1, + 102: 1, + 150: 1, + 199: 1, + 200: 2, + 201: 2, + 202: 2, + 300: 2, + 498: 2, + 499: 2, + 500: 3, + 501: 3, + 502: 3, + 900: 3, + 998: 3, + 999: 3, + 1000: 4, + 1001: 4, + 1002: 4, + 1100: 4, + 1199: 4, + 1200: 5, + 1201: 5, + 1400: 5, + 1499: 5, + 1500: 0, + 1501: 0, + 2000: 0, + 5000: 0, + } + + for blockNum, expectedId := range examples { + actualId, err := test.index.Lookup(ctx, blockNum) + require.NoError(t, err) + assert.Equal(t, expectedId, actualId) + } +} diff --git a/polygon/heimdall/scraper.go b/polygon/heimdall/scraper.go index 14c40c9acba..53838f42d87 100644 --- a/polygon/heimdall/scraper.go +++ b/polygon/heimdall/scraper.go @@ -28,12 +28,14 @@ type Scraper struct { milestoneSyncEvent *polygoncommon.EventNotifier spanSyncEvent *polygoncommon.EventNotifier + tmpDir string logger log.Logger } func NewScraperTODO( client HeimdallClient, pollDelay time.Duration, + tmpDir string, logger log.Logger, ) *Scraper { return NewScraper( @@ -41,6 +43,7 @@ func NewScraperTODO( func() reader { /* TODO */ return nil }, client, pollDelay, + tmpDir, logger, ) } @@ -51,6 +54,7 @@ func NewScraper( client HeimdallClient, pollDelay time.Duration, + tmpDir string, logger log.Logger, ) *Scraper { return &Scraper{ @@ -68,6 +72,7 @@ func NewScraper( milestoneSyncEvent: polygoncommon.NewEventNotifier(), spanSyncEvent: polygoncommon.NewEventNotifier(), + tmpDir: tmpDir, logger: logger, } } @@ -79,6 +84,11 @@ func (s *Scraper) syncEntity( callback func([]Entity), syncEvent *polygoncommon.EventNotifier, ) error { + defer store.Close() + if err := store.Prepare(ctx); err != nil { + return err + } + for ctx.Err() == nil { lastKnownId, hasLastKnownId, err := store.GetLastEntityId(ctx) if err != nil { @@ -123,19 +133,19 @@ func (s *Scraper) syncEntity( return ctx.Err() } -func newCheckpointStore(tx kv.RwTx, reader services.BorCheckpointReader) entityStore { +func newCheckpointStore(tx kv.RwTx, reader services.BorCheckpointReader, blockNumToIdIndexFactory func() *RangeIndex) entityStore { makeEntity := func() Entity { return new(Checkpoint) } - return newEntityStore(tx, kv.BorCheckpoints, makeEntity, reader.LastCheckpointId, reader.Checkpoint) + return newEntityStore(tx, kv.BorCheckpoints, makeEntity, reader.LastCheckpointId, reader.Checkpoint, blockNumToIdIndexFactory()) } -func newMilestoneStore(tx kv.RwTx, reader services.BorMilestoneReader) entityStore { +func newMilestoneStore(tx kv.RwTx, reader services.BorMilestoneReader, blockNumToIdIndexFactory func() *RangeIndex) entityStore { makeEntity := func() Entity { return new(Milestone) } - return newEntityStore(tx, kv.BorMilestones, makeEntity, reader.LastMilestoneId, reader.Milestone) + return newEntityStore(tx, kv.BorMilestones, makeEntity, reader.LastMilestoneId, reader.Milestone, blockNumToIdIndexFactory()) } -func newSpanStore(tx kv.RwTx, reader services.BorSpanReader) entityStore { +func newSpanStore(tx kv.RwTx, reader services.BorSpanReader, blockNumToIdIndexFactory func() *RangeIndex) entityStore { makeEntity := func() Entity { return new(Span) } - return newEntityStore(tx, kv.BorSpans, makeEntity, reader.LastSpanId, reader.Span) + return newEntityStore(tx, kv.BorSpans, makeEntity, reader.LastSpanId, reader.Span, blockNumToIdIndexFactory()) } func newCheckpointFetcher(client HeimdallClient, logger log.Logger) entityFetcher { @@ -233,13 +243,21 @@ func (s *Scraper) Run(parentCtx context.Context) error { return nil } + blockNumToIdIndexFactory := func() *RangeIndex { + index, err := NewRangeIndex(parentCtx, s.tmpDir, s.logger) + if err != nil { + panic(err) + } + return index + } + group, ctx := errgroup.WithContext(parentCtx) // sync checkpoints group.Go(func() error { return s.syncEntity( ctx, - newCheckpointStore(tx, reader), + newCheckpointStore(tx, reader, blockNumToIdIndexFactory), newCheckpointFetcher(s.client, s.logger), func(entities []Entity) { s.checkpointObservers.Notify(libcommon.SliceMap(entities, downcastCheckpointEntity)) @@ -252,7 +270,7 @@ func (s *Scraper) Run(parentCtx context.Context) error { group.Go(func() error { return s.syncEntity( ctx, - newMilestoneStore(tx, reader), + newMilestoneStore(tx, reader, blockNumToIdIndexFactory), newMilestoneFetcher(s.client, s.logger), func(entities []Entity) { s.milestoneObservers.Notify(libcommon.SliceMap(entities, downcastMilestoneEntity)) @@ -265,7 +283,7 @@ func (s *Scraper) Run(parentCtx context.Context) error { group.Go(func() error { return s.syncEntity( ctx, - newSpanStore(tx, reader), + newSpanStore(tx, reader, blockNumToIdIndexFactory), newSpanFetcher(s.client, s.logger), func(entities []Entity) { s.spanObservers.Notify(libcommon.SliceMap(entities, downcastSpanEntity)) diff --git a/polygon/heimdall/span.go b/polygon/heimdall/span.go index 297e8aa75c9..6083395f4aa 100644 --- a/polygon/heimdall/span.go +++ b/polygon/heimdall/span.go @@ -15,6 +15,10 @@ type Span struct { ChainID string `json:"bor_chain_id,omitempty" yaml:"bor_chain_id"` } +func (s *Span) RawId() uint64 { + return uint64(s.Id) +} + func (s *Span) BlockNumRange() ClosedRange { return ClosedRange{ Start: s.StartBlock, diff --git a/polygon/sync/service.go b/polygon/sync/service.go index 6210ad1bcea..4ff910c0fb9 100644 --- a/polygon/sync/service.go +++ b/polygon/sync/service.go @@ -36,6 +36,7 @@ type service struct { func NewService( logger log.Logger, chainConfig *chain.Config, + tmpDir string, sentryClient direct.SentryClient, maxPeers int, statusDataProvider *sentry.StatusDataProvider, @@ -53,6 +54,7 @@ func NewService( heimdallScraper := heimdall.NewScraperTODO( heimdallClient, 1*time.Second, + tmpDir, logger, ) blockDownloader := NewBlockDownloader( From 0eadfc37a996f56f66ee969b6d4086f3160a34d5 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 14 May 2024 22:09:33 +0700 Subject: [PATCH 47/48] evm: to use temporal db. remove `historyV3` flag from db (#10305) for https://github.com/ledgerwatch/erigon/issues/10298 --------- Co-authored-by: awskii --- .../services/polygon/proofgenerator_test.go | 8 +- cmd/evm/internal/t8ntool/execution.go | 5 +- cmd/evm/internal/t8ntool/transition.go | 9 +- cmd/evm/runner.go | 7 +- cmd/evm/staterunner.go | 23 +- cmd/hack/hack.go | 10 +- cmd/hack/tool/fromdb/tool.go | 14 - cmd/integration/commands/reset_state.go | 7 +- cmd/integration/commands/root.go | 22 +- cmd/integration/commands/stages.go | 529 ++++++++---------- cmd/integration/commands/state_stages.go | 15 +- cmd/rpcdaemon/cli/config.go | 22 +- cmd/state/commands/check_change_sets.go | 295 ---------- cmd/state/commands/opcode_tracer.go | 11 +- cmd/state/commands/state_root.go | 12 +- cmd/state/verify/verify_txlookup.go | 10 +- cmd/txpool/main.go | 2 - core/genesis_test.go | 6 +- core/genesis_write.go | 32 +- core/rawdb/blockio/block_writer.go | 29 +- core/rawdb/rawdbreset/reset_stages.go | 57 +- core/state/plain_readonly.go | 5 - core/state/state_test.go | 11 +- core/test/domains_restart_test.go | 7 - core/vm/gas_table_test.go | 6 +- erigon-lib/common/dbg/dbg_env.go | 15 +- erigon-lib/kv/kvcache/cache_test.go | 6 +- erigon-lib/kv/kvcache/dummy.go | 2 +- erigon-lib/kv/kvcfg/accessors_config.go | 4 - erigon-lib/kv/temporal/kv_temporal.go | 4 - .../temporaltest/kv_temporal_testdb.go | 15 +- erigon-lib/txpool/pool.go | 2 +- erigon-lib/txpool/pool_fuzz_test.go | 2 +- erigon-lib/txpool/pool_test.go | 18 +- eth/backend.go | 36 +- eth/ethconfig/config.go | 4 - eth/stagedsync/default_stages.go | 23 +- eth/stagedsync/exec3.go | 34 -- eth/stagedsync/stage_bodies.go | 6 +- eth/stagedsync/stage_call_traces_test.go | 7 +- eth/stagedsync/stage_execute.go | 173 +----- eth/stagedsync/stage_hashstate.go | 44 +- eth/stagedsync/stage_hashstate_test.go | 23 +- eth/stagedsync/stage_headers.go | 40 +- eth/stagedsync/stage_mining_exec.go | 34 +- eth/stagedsync/stage_snapshots.go | 89 ++- eth/stagedsync/stage_trie3_test.go | 8 +- migrations/commitment.go | 4 +- p2p/sentry/sentry_grpc_server_test.go | 6 +- .../sentry_multi_client.go | 85 ++- tests/state_test.go | 2 +- tests/state_test_util.go | 8 +- turbo/app/snapshots_cmd.go | 7 +- .../engine_helpers/fork_validator.go | 32 +- turbo/execution/eth1/ethereum_execution.go | 8 +- turbo/execution/eth1/forkchoice.go | 34 +- turbo/jsonrpc/debug_api.go | 150 ++--- turbo/jsonrpc/erigon_block.go | 70 +-- turbo/jsonrpc/eth_accounts.go | 10 +- turbo/jsonrpc/eth_api.go | 16 - turbo/jsonrpc/eth_block.go | 5 +- turbo/jsonrpc/eth_call.go | 149 +++-- turbo/jsonrpc/eth_callMany.go | 2 +- turbo/jsonrpc/eth_call_test.go | 4 +- turbo/jsonrpc/eth_receipts.go | 92 +-- turbo/jsonrpc/otterscan_api.go | 147 +---- turbo/jsonrpc/otterscan_contract_creator.go | 270 +++------ turbo/jsonrpc/otterscan_generic_tracer.go | 95 +--- turbo/jsonrpc/otterscan_has_code.go | 2 +- turbo/jsonrpc/otterscan_search_trace.go | 2 +- ...terscan_transaction_by_sender_and_nonce.go | 275 +++------ turbo/jsonrpc/overlay_api.go | 4 +- turbo/jsonrpc/parity_api.go | 62 +- turbo/jsonrpc/trace_adhoc.go | 4 +- turbo/jsonrpc/trace_filtering.go | 223 +------- turbo/jsonrpc/tracing.go | 10 +- turbo/rpchelper/helper.go | 49 +- turbo/snapshotsync/snapshotsync.go | 7 +- turbo/stages/genesis_test.go | 2 +- turbo/stages/mock/mock_sentry.go | 54 +- turbo/stages/stageloop.go | 64 +-- turbo/transactions/tracing.go | 76 +-- 82 files changed, 975 insertions(+), 2798 deletions(-) delete mode 100644 cmd/state/commands/check_change_sets.go diff --git a/cmd/devnet/services/polygon/proofgenerator_test.go b/cmd/devnet/services/polygon/proofgenerator_test.go index 92f992535ce..0a0e5b57de5 100644 --- a/cmd/devnet/services/polygon/proofgenerator_test.go +++ b/cmd/devnet/services/polygon/proofgenerator_test.go @@ -19,7 +19,6 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/accounts/abi/bind" "github.com/ledgerwatch/erigon/cmd/devnet/blocks" @@ -145,12 +144,7 @@ func (rg *requestGenerator) GetTransactionReceipt(ctx context.Context, hash libc } defer tx.Rollback() - historyV3, err := kvcfg.HistoryV3.Enabled(tx) - if err != nil { - panic(err) - } - - _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, reader, tx, 0, historyV3) + _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, reader, tx, 0) if err != nil { return nil, err diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index 011d3b484c5..e04944ea1e1 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -24,8 +24,6 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" - "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/consensus/ethash" @@ -78,8 +76,7 @@ type stEnvMarshaling struct { func MakePreState(chainRules *chain.Rules, tx kv.RwTx, accounts types.GenesisAlloc) (state.StateReader, *state.PlainStateWriter) { var blockNr uint64 = 0 - histV3, _ := kvcfg.HistoryV3.Enabled(tx) - stateReader, stateWriter := rpchelper.NewLatestStateReader(tx, histV3), state.NewPlainStateWriter(tx, tx, blockNr) + stateReader, stateWriter := rpchelper.NewLatestStateReader(tx), state.NewPlainStateWriter(tx, tx, blockNr) statedb := state.New(stateReader) //ibs for addr, a := range accounts { statedb.SetCode(addr, a.Code) diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index 095c626807e..f89020cfc95 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -39,7 +39,6 @@ import ( "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/consensus/merge" @@ -294,7 +293,7 @@ func Main(ctx *cli.Context) error { return h } - _, db, _ := temporaltest.NewTestDB(nil, datadir.New("")) + db, _ := temporaltest.NewTestDB(nil, datadir.New("")) defer db.Close() tx, err := db.BeginRw(context.Background()) @@ -331,11 +330,7 @@ func Main(ctx *cli.Context) error { body, _ := rlp.EncodeToBytes(txs) collector := make(Alloc) - historyV3, err := kvcfg.HistoryV3.Enabled(tx) - if err != nil { - return err - } - dumper := state.NewDumper(tx, prestate.Env.Number, historyV3) + dumper := state.NewDumper(tx, prestate.Env.Number, true) dumper.DumpToCollector(collector, false, false, libcommon.Address{}, 0) return dispatchOutput(ctx, baseDir, result, collector, body) } diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index 6f88d47e85e..86e9659adc1 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -34,7 +34,6 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" common2 "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/common/hexutility" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/cmd/utils/flags" "github.com/ledgerwatch/erigon/core/types" @@ -301,11 +300,7 @@ func runCmd(ctx *cli.Context) error { fmt.Println("Could not commit state: ", err) os.Exit(1) } - historyV3, err := kvcfg.HistoryV3.Enabled(tx) - if err != nil { - return err - } - fmt.Println(string(state.NewDumper(tx, 0, historyV3).DefaultDump())) + fmt.Println(string(state.NewDumper(tx, 0, true).DefaultDump())) } if memProfilePath := ctx.String(MemProfileFlag.Name); memProfilePath != "" { diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index d3ecda3cd6f..67cabd4c0b2 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -27,7 +27,11 @@ import ( "github.com/c2h5oh/datasize" mdbx2 "github.com/erigontech/mdbx-go/mdbx" libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/config3" "github.com/ledgerwatch/erigon-lib/kv/mdbx" + "github.com/ledgerwatch/erigon-lib/kv/temporal" + libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/log/v3" "github.com/urfave/cli/v2" @@ -122,16 +126,29 @@ func runStateTest(fname string, cfg vm.Config, jsonOut bool) error { func aggregateResultsFromStateTests( stateTests map[string]tests.StateTest, cfg vm.Config, jsonOut bool) ([]StatetestResult, error) { + dirs := datadir.New(filepath.Join(os.TempDir(), "erigon-statetest")) //this DB is shared. means: // - faster sequential tests: don't need create/delete db // - less parallelism: multiple processes can open same DB but only 1 can create rw-transaction (other will wait when 1-st finish) - db := mdbx.NewMDBX(log.New()). - Path(filepath.Join(os.TempDir(), "erigon-statetest")). + _db := mdbx.NewMDBX(log.New()). + Path(dirs.Chaindata). Flags(func(u uint) uint { - return u | mdbx2.UtterlyNoSync | mdbx2.NoMetaSync | mdbx2.LifoReclaim | mdbx2.NoMemInit + return u | mdbx2.UtterlyNoSync | mdbx2.NoMetaSync | mdbx2.NoMemInit | mdbx2.WriteMap }). GrowthStep(1 * datasize.MB). MustOpen() + defer _db.Close() + + agg, err := libstate.NewAggregator(context.Background(), dirs, config3.HistoryV3AggregationStep, _db, log.New()) + if err != nil { + return nil, err + } + defer agg.Close() + + db, err := temporal.New(_db, agg) + if err != nil { + return nil, err + } defer db.Close() tx, txErr := db.BeginRw(context.Background()) diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go index 44c84d87161..80fbf94dac5 100644 --- a/cmd/hack/hack.go +++ b/cmd/hack/hack.go @@ -29,7 +29,6 @@ import ( "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2" "github.com/ledgerwatch/erigon-lib/recsplit" @@ -132,15 +131,8 @@ func printCurrentBlockNumber(chaindata string) { } func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { - var histV3 bool - if err := db.View(context.Background(), func(tx kv.Tx) error { - histV3, _ = kvcfg.HistoryV3.Enabled(tx) - return nil - }); err != nil { - panic(err) - } br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 0, log.New()), nil /* BorSnapshots */) - bw := blockio.NewBlockWriter(histV3) + bw := blockio.NewBlockWriter() return br, bw } diff --git a/cmd/hack/tool/fromdb/tool.go b/cmd/hack/tool/fromdb/tool.go index 8bcff3561ca..01852ee79f0 100644 --- a/cmd/hack/tool/fromdb/tool.go +++ b/cmd/hack/tool/fromdb/tool.go @@ -5,7 +5,6 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon/cmd/hack/tool" "github.com/ledgerwatch/erigon/ethdb/prune" ) @@ -35,16 +34,3 @@ func PruneMode(db kv.RoDB) (pm prune.Mode) { } return } -func HistV3(db kv.RoDB) (enabled bool) { - if err := db.View(context.Background(), func(tx kv.Tx) error { - var err error - enabled, err = kvcfg.HistoryV3.Enabled(tx) - if err != nil { - return err - } - return nil - }); err != nil { - panic(err) - } - return -} diff --git a/cmd/integration/commands/reset_state.go b/cmd/integration/commands/reset_state.go index 35ece38fe28..de6486b8cde 100644 --- a/cmd/integration/commands/reset_state.go +++ b/cmd/integration/commands/reset_state.go @@ -13,7 +13,6 @@ import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" @@ -123,14 +122,10 @@ func printStages(tx kv.Tx, snapshots *freezeblocks.RoSnapshots, borSn *freezeblo fmt.Fprintf(w, "prune distance: %s\n\n", pm.String()) fmt.Fprintf(w, "blocks.v2: %t, segments=%d, indices=%d\n", snapshots.Cfg().Enabled, snapshots.SegmentsMax(), snapshots.IndicesMax()) fmt.Fprintf(w, "blocks.bor.v2: segments=%d, indices=%d\n\n", borSn.SegmentsMax(), borSn.IndicesMax()) - h3, err := kvcfg.HistoryV3.Enabled(tx) - if err != nil { - return err - } _, lastBlockInHistSnap, _ := rawdbv3.TxNums.FindBlockNum(tx, agg.EndTxNumMinimax()) _lb, _lt, _ := rawdbv3.TxNums.Last(tx) - fmt.Fprintf(w, "history.v3: %t, idx steps: %.02f, lastBlockInSnap=%d, TxNums_Index(%d,%d), filesAmount: %d\n\n", h3, rawdbhelpers.IdxStepsCountV3(tx), lastBlockInHistSnap, _lb, _lt, agg.FilesAmount()) + fmt.Fprintf(w, "state.history: idx steps: %.02f, lastBlockInSnap=%d, TxNums_Index(%d,%d), filesAmount: %d\n\n", rawdbhelpers.IdxStepsCountV3(tx), lastBlockInHistSnap, _lb, _lt, agg.FilesAmount()) s1, err := tx.ReadSequence(kv.EthTx) if err != nil { return err diff --git a/cmd/integration/commands/root.go b/cmd/integration/commands/root.go index fcb9a932357..5432ba3ea92 100644 --- a/cmd/integration/commands/root.go +++ b/cmd/integration/commands/root.go @@ -13,7 +13,6 @@ import ( "golang.org/x/sync/semaphore" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon/cmd/utils" @@ -92,25 +91,12 @@ func openDB(opts kv2.MdbxOpts, applyMigrations bool, logger log.Logger) (kv.RwDB } if opts.GetLabel() == kv.ChainDB { - var h3 bool - var err error - if err := db.View(context.Background(), func(tx kv.Tx) error { - h3, err = kvcfg.HistoryV3.Enabled(tx) - if err != nil { - return err - } - return nil - }); err != nil { + _, _, agg := allSnapshots(context.Background(), db, logger) + tdb, err := temporal.New(db, agg) + if err != nil { return nil, err } - if h3 { - _, _, agg := allSnapshots(context.Background(), db, logger) - tdb, err := temporal.New(db, agg) - if err != nil { - return nil, err - } - db = tdb - } + db = tdb } return db, nil diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index 2c9505d9b76..925849f2ea0 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -27,7 +27,6 @@ import ( "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/config3" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon-lib/wrap" @@ -566,28 +565,6 @@ var cmdSetSnap = &cobra.Command{ }, } -var cmdForceSetHistoryV3 = &cobra.Command{ - Use: "force_set_history_v3", - Short: "Override existing --history.v3 flag value (if you know what you are doing)", - Run: func(cmd *cobra.Command, args []string) { - logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) - if err != nil { - logger.Error("Opening DB", "error", err) - return - } - defer db.Close() - if err := db.Update(context.Background(), func(tx kv.RwTx) error { - return kvcfg.HistoryV3.ForceWrite(tx, _forceSetHistoryV3) - }); err != nil { - if !errors.Is(err, context.Canceled) { - logger.Error(err.Error()) - } - return - } - }, -} - func init() { withConfig(cmdPrintStages) withDataDir(cmdPrintStages) @@ -766,12 +743,6 @@ func init() { must(cmdSetSnap.MarkFlagRequired("snapshots")) rootCmd.AddCommand(cmdSetSnap) - withConfig(cmdForceSetHistoryV3) - withDataDir2(cmdForceSetHistoryV3) - cmdForceSetHistoryV3.Flags().BoolVar(&_forceSetHistoryV3, "history.v3", false, "") - must(cmdForceSetHistoryV3.MarkFlagRequired("history.v3")) - rootCmd.AddCommand(cmdForceSetHistoryV3) - withConfig(cmdSetPrune) withDataDir(cmdSetPrune) withChain(cmdSetPrune) @@ -799,7 +770,7 @@ func stageSnapshots(db kv.RwDB, ctx context.Context, logger log.Logger) error { br, bw := blocksIO(db, logger) _, _, _, _, _ = newSync(ctx, db, nil /* miningConfig */, logger) - chainConfig, _, _ := fromdb.ChainConfig(db), kvcfg.HistoryV3.FromDB(db), fromdb.PruneMode(db) + chainConfig, _ := fromdb.ChainConfig(db), fromdb.PruneMode(db) return db.Update(ctx, func(tx kv.RwTx) error { if reset { @@ -854,7 +825,7 @@ func stageHeaders(db kv.RwDB, ctx context.Context, logger log.Logger) error { defer agg.Close() br, bw := blocksIO(db, logger) _, _, _, _, _ = newSync(ctx, db, nil /* miningConfig */, logger) - chainConfig, _, _ := fromdb.ChainConfig(db), kvcfg.HistoryV3.FromDB(db), fromdb.PruneMode(db) + chainConfig, _ := fromdb.ChainConfig(db), fromdb.PruneMode(db) if integritySlow { if err := db.View(ctx, func(tx kv.Tx) error { @@ -1020,7 +991,7 @@ func stageBodies(db kv.RwDB, ctx context.Context, logger log.Logger) error { defer sn.Close() defer borSn.Close() defer agg.Close() - chainConfig, historyV3 := fromdb.ChainConfig(db), kvcfg.HistoryV3.FromDB(db) + chainConfig := fromdb.ChainConfig(db) _, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) br, bw := blocksIO(db, logger) @@ -1033,7 +1004,7 @@ func stageBodies(db kv.RwDB, ctx context.Context, logger log.Logger) error { } u := sync.NewUnwindState(stages.Bodies, s.BlockNumber-unwind, s.BlockNumber) - cfg := stagedsync.StageBodiesCfg(db, nil, nil, nil, nil, 0, *chainConfig, br, historyV3, bw, nil) + cfg := stagedsync.StageBodiesCfg(db, nil, nil, nil, nil, 0, *chainConfig, br, bw, nil) if err := stagedsync.UnwindBodiesStage(u, tx, cfg, ctx); err != nil { return err } @@ -1180,7 +1151,7 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error { s := stage(sync, nil, db, stages.Execution) logger.Info("Stage", "name", s.ID, "progress", s.BlockNumber) - chainConfig, historyV3, pm := fromdb.ChainConfig(db), kvcfg.HistoryV3.FromDB(db), fromdb.PruneMode(db) + chainConfig, pm := fromdb.ChainConfig(db), fromdb.PruneMode(db) if pruneTo > 0 { pm.History = prune.Distance(s.BlockNumber - pruneTo) pm.Receipts = prune.Distance(s.BlockNumber - pruneTo) @@ -1196,9 +1167,9 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error { br, _ := blocksIO(db, logger) cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, /*stateStream=*/ false, - /*badBlockHalt=*/ true, historyV3, dirs, br, nil, genesis, syncCfg, agg, nil) + /*badBlockHalt=*/ true, dirs, br, nil, genesis, syncCfg, agg, nil) - if unwind > 0 && historyV3 { + if unwind > 0 { if err := db.View(ctx, func(tx kv.Tx) error { blockNumWithCommitment, ok, err := tx.(libstate.HasAggTx).AggTx().(*libstate.AggregatorRoTx).CanUnwindBeforeBlockNum(s.BlockNumber-unwind, tx) if err != nil { @@ -1289,7 +1260,7 @@ func stageCustomTrace(db kv.RwDB, ctx context.Context, logger log.Logger) error s := stage(sync, nil, db, stages.CustomTrace) logger.Info("Stage", "name", s.ID, "progress", s.BlockNumber) - chainConfig, historyV3, pm := fromdb.ChainConfig(db), kvcfg.HistoryV3.FromDB(db), fromdb.PruneMode(db) + chainConfig, pm := fromdb.ChainConfig(db), fromdb.PruneMode(db) if pruneTo > 0 { pm.History = prune.Distance(s.BlockNumber - pruneTo) pm.Receipts = prune.Distance(s.BlockNumber - pruneTo) @@ -1305,7 +1276,7 @@ func stageCustomTrace(db kv.RwDB, ctx context.Context, logger log.Logger) error br, _ := blocksIO(db, logger) cfg := stagedsync.StageCustomTraceCfg(db, pm, dirs, br, chainConfig, engine, genesis, &syncCfg) - if unwind > 0 && historyV3 { + if unwind > 0 { if err := db.View(ctx, func(tx kv.Tx) error { blockNumWithCommitment, ok, err := tx.(libstate.HasAggTx).AggTx().(*libstate.AggregatorRoTx).CanUnwindBeforeBlockNum(s.BlockNumber-unwind, tx) if err != nil { @@ -1362,7 +1333,7 @@ func stageCustomTrace(db kv.RwDB, ctx context.Context, logger log.Logger) error } func stageTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error { - dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), kvcfg.HistoryV3.FromDB(db) + dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db) sn, borSn, agg := allSnapshots(ctx, db, logger) defer sn.Close() defer borSn.Close() @@ -1395,6 +1366,7 @@ func stageTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error { logger.Info("StageExec", "progress", execStage.BlockNumber) logger.Info("StageTrie", "progress", s.BlockNumber) br, _ := blocksIO(db, logger) + historyV3 := true cfg := stagedsync.StageTrieCfg(db, true /* checkRoot */, true /* saveHashesToDb */, false /* badBlockHalt */, dirs.Tmp, br, nil /* hd */, historyV3, agg) if unwind > 0 { u := sync.NewUnwindState(stages.IntermediateHashes, s.BlockNumber-unwind, s.BlockNumber) @@ -1420,7 +1392,7 @@ func stageTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error { } func stagePatriciaTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error { - dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), kvcfg.HistoryV3.FromDB(db) + dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db) _ = pm sn, _, agg := allSnapshots(ctx, db, logger) defer sn.Close() @@ -1439,11 +1411,8 @@ func stagePatriciaTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error } defer tx.Rollback() - if enabled, _ := kvcfg.HistoryV3.Enabled(tx); !enabled { - panic("this method for v3 only") - } - br, _ := blocksIO(db, logger) + historyV3 := true cfg := stagedsync.StageTrieCfg(db, true /* checkRoot */, true /* saveHashesToDb */, false /* badBlockHalt */, dirs.Tmp, br, nil /* hd */, historyV3, agg) if _, err := stagedsync.RebuildPatriciaTrieBasedOnFiles(tx, cfg, ctx, logger); err != nil { @@ -1453,257 +1422,254 @@ func stagePatriciaTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error } func stageHashState(db kv.RwDB, ctx context.Context, logger log.Logger) error { - dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), kvcfg.HistoryV3.FromDB(db) - sn, borSn, agg := allSnapshots(ctx, db, logger) - defer sn.Close() - defer borSn.Close() - defer agg.Close() - _, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) - must(sync.SetCurrentStage(stages.HashState)) - - if warmup { - return reset2.Warmup(ctx, db, log.LvlInfo, stages.HashState) - } - if reset { - return reset2.Reset(ctx, db, stages.HashState) - } - - tx, err := db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - - s := stage(sync, tx, nil, stages.HashState) - if pruneTo > 0 { - pm.History = prune.Distance(s.BlockNumber - pruneTo) - pm.Receipts = prune.Distance(s.BlockNumber - pruneTo) - pm.CallTraces = prune.Distance(s.BlockNumber - pruneTo) - pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo) - } - - logger.Info("Stage", "name", s.ID, "progress", s.BlockNumber) - - cfg := stagedsync.StageHashStateCfg(db, dirs, historyV3) - if unwind > 0 { - u := sync.NewUnwindState(stages.HashState, s.BlockNumber-unwind, s.BlockNumber) - err = stagedsync.UnwindHashStateStage(u, s, tx, cfg, ctx, logger) - if err != nil { - return err - } - } else if pruneTo > 0 { - p, err := sync.PruneStageState(stages.HashState, s.BlockNumber, tx, nil) - if err != nil { - return err - } - err = stagedsync.PruneHashStateStage(p, tx, cfg, ctx) - if err != nil { - return err - } - } else { - err = stagedsync.SpawnHashStateStage(s, tx, cfg, ctx, logger) - if err != nil { - return err - } - } - return tx.Commit() + return fmt.Errorf("this stage is disable in --history.v3=true") + //dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db) + //sn, borSn, agg := allSnapshots(ctx, db, logger) + //defer sn.Close() + //defer borSn.Close() + //defer agg.Close() + //_, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) + //must(sync.SetCurrentStage(stages.HashState)) + // + //if warmup { + // return reset2.Warmup(ctx, db, log.LvlInfo, stages.HashState) + //} + //if reset { + // return reset2.Reset(ctx, db, stages.HashState) + //} + // + //tx, err := db.BeginRw(ctx) + //if err != nil { + // return err + //} + //defer tx.Rollback() + // + //s := stage(sync, tx, nil, stages.HashState) + //if pruneTo > 0 { + // pm.History = prune.Distance(s.BlockNumber - pruneTo) + // pm.Receipts = prune.Distance(s.BlockNumber - pruneTo) + // pm.CallTraces = prune.Distance(s.BlockNumber - pruneTo) + // pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo) + //} + // + //logger.Info("Stage", "name", s.ID, "progress", s.BlockNumber) + // + //cfg := stagedsync.StageHashStateCfg(db, dirs, historyV3) + //if unwind > 0 { + // u := sync.NewUnwindState(stages.HashState, s.BlockNumber-unwind, s.BlockNumber) + // err = stagedsync.UnwindHashStateStage(u, s, tx, cfg, ctx, logger) + // if err != nil { + // return err + // } + //} else if pruneTo > 0 { + // p, err := sync.PruneStageState(stages.HashState, s.BlockNumber, tx, nil) + // if err != nil { + // return err + // } + // err = stagedsync.PruneHashStateStage(p, tx, cfg, ctx) + // if err != nil { + // return err + // } + //} else { + // err = stagedsync.SpawnHashStateStage(s, tx, cfg, ctx, logger) + // if err != nil { + // return err + // } + //} + //return tx.Commit() } func stageLogIndex(db kv.RwDB, ctx context.Context, logger log.Logger) error { - dirs, pm, historyV3, chainConfig := datadir.New(datadirCli), fromdb.PruneMode(db), kvcfg.HistoryV3.FromDB(db), fromdb.ChainConfig(db) - if historyV3 { - return fmt.Errorf("this stage is disable in --history.v3=true") - } - _, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) - must(sync.SetCurrentStage(stages.LogIndex)) - if warmup { - return reset2.Warmup(ctx, db, log.LvlInfo, stages.LogIndex) - } - if reset { - return reset2.Reset(ctx, db, stages.LogIndex) - } - if resetPruneAt { - return reset2.ResetPruneAt(ctx, db, stages.LogIndex) - } - tx, err := db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - - execAt := progress(tx, stages.Execution) - s := stage(sync, tx, nil, stages.LogIndex) - if pruneTo > 0 { - pm.History = prune.Distance(s.BlockNumber - pruneTo) - pm.Receipts = prune.Distance(s.BlockNumber - pruneTo) - pm.CallTraces = prune.Distance(s.BlockNumber - pruneTo) - pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo) - } - - logger.Info("Stage exec", "progress", execAt) - logger.Info("Stage", "name", s.ID, "progress", s.BlockNumber) - - cfg := stagedsync.StageLogIndexCfg(db, pm, dirs.Tmp, chainConfig.DepositContract) - if unwind > 0 { - u := sync.NewUnwindState(stages.LogIndex, s.BlockNumber-unwind, s.BlockNumber) - err = stagedsync.UnwindLogIndex(u, s, tx, cfg, ctx) - if err != nil { - return err - } - } else if pruneTo > 0 { - p, err := sync.PruneStageState(stages.LogIndex, s.BlockNumber, nil, db) - if err != nil { - return err - } - err = stagedsync.PruneLogIndex(p, tx, cfg, ctx, logger) - if err != nil { - return err - } - } else { - if err := stagedsync.SpawnLogIndex(s, tx, cfg, ctx, block, logger); err != nil { - return err - } - } - return tx.Commit() + return fmt.Errorf("this stage is disable in --history.v3=true") + //dirs, pm, chainConfig := datadir.New(datadirCli), fromdb.PruneMode(db), fromdb.ChainConfig(db) + //_, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) + //must(sync.SetCurrentStage(stages.LogIndex)) + //if warmup { + // return reset2.Warmup(ctx, db, log.LvlInfo, stages.LogIndex) + //} + //if reset { + // return reset2.Reset(ctx, db, stages.LogIndex) + //} + //if resetPruneAt { + // return reset2.ResetPruneAt(ctx, db, stages.LogIndex) + //} + //tx, err := db.BeginRw(ctx) + //if err != nil { + // return err + //} + //defer tx.Rollback() + // + //execAt := progress(tx, stages.Execution) + //s := stage(sync, tx, nil, stages.LogIndex) + //if pruneTo > 0 { + // pm.History = prune.Distance(s.BlockNumber - pruneTo) + // pm.Receipts = prune.Distance(s.BlockNumber - pruneTo) + // pm.CallTraces = prune.Distance(s.BlockNumber - pruneTo) + // pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo) + //} + // + //logger.Info("Stage exec", "progress", execAt) + //logger.Info("Stage", "name", s.ID, "progress", s.BlockNumber) + // + //cfg := stagedsync.StageLogIndexCfg(db, pm, dirs.Tmp, chainConfig.DepositContract) + //if unwind > 0 { + // u := sync.NewUnwindState(stages.LogIndex, s.BlockNumber-unwind, s.BlockNumber) + // err = stagedsync.UnwindLogIndex(u, s, tx, cfg, ctx) + // if err != nil { + // return err + // } + //} else if pruneTo > 0 { + // p, err := sync.PruneStageState(stages.LogIndex, s.BlockNumber, nil, db) + // if err != nil { + // return err + // } + // err = stagedsync.PruneLogIndex(p, tx, cfg, ctx, logger) + // if err != nil { + // return err + // } + //} else { + // if err := stagedsync.SpawnLogIndex(s, tx, cfg, ctx, block, logger); err != nil { + // return err + // } + //} + //return tx.Commit() } func stageCallTraces(db kv.RwDB, ctx context.Context, logger log.Logger) error { - dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), kvcfg.HistoryV3.FromDB(db) - if historyV3 { - return fmt.Errorf("this stage is disable in --history.v3=true") - } - _, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) - must(sync.SetCurrentStage(stages.CallTraces)) - - if warmup { - return reset2.Warmup(ctx, db, log.LvlInfo, stages.CallTraces) - } - if reset { - return reset2.Reset(ctx, db, stages.CallTraces) - } - - tx, err := db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - var batchSize datasize.ByteSize - must(batchSize.UnmarshalText([]byte(batchSizeStr))) + return fmt.Errorf("this stage is disable in --history.v3=true") + /* + dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db) + _, _, sync, _, _ := newSync(ctx, db, nil , logger) + must(sync.SetCurrentStage(stages.CallTraces)) - execStage := progress(tx, stages.Execution) - s := stage(sync, tx, nil, stages.CallTraces) - if pruneTo > 0 { - pm.History = prune.Distance(s.BlockNumber - pruneTo) - pm.Receipts = prune.Distance(s.BlockNumber - pruneTo) - pm.CallTraces = prune.Distance(s.BlockNumber - pruneTo) - pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo) - } - logger.Info("ID exec", "progress", execStage) - if block != 0 { - s.BlockNumber = block - logger.Info("Overriding initial state", "block", block) - } - logger.Info("ID call traces", "progress", s.BlockNumber) - - cfg := stagedsync.StageCallTracesCfg(db, pm, block, dirs.Tmp) + if warmup { + return reset2.Warmup(ctx, db, log.LvlInfo, stages.CallTraces) + } + if reset { + return reset2.Reset(ctx, db, stages.CallTraces) + } - if unwind > 0 { - u := sync.NewUnwindState(stages.CallTraces, s.BlockNumber-unwind, s.BlockNumber) - err = stagedsync.UnwindCallTraces(u, s, tx, cfg, ctx, logger) + tx, err := db.BeginRw(ctx) if err != nil { return err } - } else if pruneTo > 0 { - p, err := sync.PruneStageState(stages.CallTraces, s.BlockNumber, tx, nil) - if err != nil { - return err + defer tx.Rollback() + var batchSize datasize.ByteSize + must(batchSize.UnmarshalText([]byte(batchSizeStr))) + + execStage := progress(tx, stages.Execution) + s := stage(sync, tx, nil, stages.CallTraces) + if pruneTo > 0 { + pm.History = prune.Distance(s.BlockNumber - pruneTo) + pm.Receipts = prune.Distance(s.BlockNumber - pruneTo) + pm.CallTraces = prune.Distance(s.BlockNumber - pruneTo) + pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo) } - err = stagedsync.PruneCallTraces(p, tx, cfg, ctx, logger) - if err != nil { - return err + logger.Info("ID exec", "progress", execStage) + if block != 0 { + s.BlockNumber = block + logger.Info("Overriding initial state", "block", block) } - } else { - if err := stagedsync.SpawnCallTraces(s, tx, cfg, ctx, logger); err != nil { - return err + logger.Info("ID call traces", "progress", s.BlockNumber) + + cfg := stagedsync.StageCallTracesCfg(db, pm, block, dirs.Tmp) + + if unwind > 0 { + u := sync.NewUnwindState(stages.CallTraces, s.BlockNumber-unwind, s.BlockNumber) + err = stagedsync.UnwindCallTraces(u, s, tx, cfg, ctx, logger) + if err != nil { + return err + } + } else if pruneTo > 0 { + p, err := sync.PruneStageState(stages.CallTraces, s.BlockNumber, tx, nil) + if err != nil { + return err + } + err = stagedsync.PruneCallTraces(p, tx, cfg, ctx, logger) + if err != nil { + return err + } + } else { + if err := stagedsync.SpawnCallTraces(s, tx, cfg, ctx, logger); err != nil { + return err + } } - } - return tx.Commit() + return tx.Commit() + */ } func stageHistory(db kv.RwDB, ctx context.Context, logger log.Logger) error { - dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), kvcfg.HistoryV3.FromDB(db) - if historyV3 { - return fmt.Errorf("this stage is disable in --history.v3=true") - } - sn, borSn, agg := allSnapshots(ctx, db, logger) - defer sn.Close() - defer borSn.Close() - defer agg.Close() - _, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) - must(sync.SetCurrentStage(stages.AccountHistoryIndex)) - - if warmup { - return reset2.Warmup(ctx, db, log.LvlInfo, stages.AccountHistoryIndex, stages.StorageHistoryIndex) - } - if reset { - return reset2.Reset(ctx, db, stages.AccountHistoryIndex, stages.StorageHistoryIndex) - } - tx, err := db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - - execStage := progress(tx, stages.Execution) - stageStorage := stage(sync, tx, nil, stages.StorageHistoryIndex) - stageAcc := stage(sync, tx, nil, stages.AccountHistoryIndex) - if pruneTo > 0 { - pm.History = prune.Distance(stageAcc.BlockNumber - pruneTo) - pm.Receipts = prune.Distance(stageAcc.BlockNumber - pruneTo) - pm.CallTraces = prune.Distance(stageAcc.BlockNumber - pruneTo) - pm.TxIndex = prune.Distance(stageAcc.BlockNumber - pruneTo) - } - logger.Info("ID exec", "progress", execStage) - logger.Info("ID acc history", "progress", stageAcc.BlockNumber) - logger.Info("ID storage history", "progress", stageStorage.BlockNumber) - - cfg := stagedsync.StageHistoryCfg(db, pm, dirs.Tmp) - if unwind > 0 { //nolint:staticcheck - u := sync.NewUnwindState(stages.StorageHistoryIndex, stageStorage.BlockNumber-unwind, stageStorage.BlockNumber) - if err := stagedsync.UnwindStorageHistoryIndex(u, stageStorage, tx, cfg, ctx); err != nil { - return err - } - u = sync.NewUnwindState(stages.AccountHistoryIndex, stageAcc.BlockNumber-unwind, stageAcc.BlockNumber) - if err := stagedsync.UnwindAccountHistoryIndex(u, stageAcc, tx, cfg, ctx); err != nil { - return err - } - } else if pruneTo > 0 { - pa, err := sync.PruneStageState(stages.AccountHistoryIndex, stageAcc.BlockNumber, tx, db) - if err != nil { - return err - } - err = stagedsync.PruneAccountHistoryIndex(pa, tx, cfg, ctx, logger) - if err != nil { - return err - } - ps, err := sync.PruneStageState(stages.StorageHistoryIndex, stageStorage.BlockNumber, tx, db) - if err != nil { - return err - } - err = stagedsync.PruneStorageHistoryIndex(ps, tx, cfg, ctx, logger) - if err != nil { - return err - } - _ = printStages(tx, sn, borSn, agg) - } else { - if err := stagedsync.SpawnAccountHistoryIndex(stageAcc, tx, cfg, ctx, logger); err != nil { - return err - } - if err := stagedsync.SpawnStorageHistoryIndex(stageStorage, tx, cfg, ctx, logger); err != nil { - return err - } - } - return tx.Commit() + return fmt.Errorf("this stage is disable in --history.v3=true") + //dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db) + //sn, borSn, agg := allSnapshots(ctx, db, logger) + //defer sn.Close() + //defer borSn.Close() + //defer agg.Close() + //_, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) + //must(sync.SetCurrentStage(stages.AccountHistoryIndex)) + // + //if warmup { + // return reset2.Warmup(ctx, db, log.LvlInfo, stages.AccountHistoryIndex, stages.StorageHistoryIndex) + //} + //if reset { + // return reset2.Reset(ctx, db, stages.AccountHistoryIndex, stages.StorageHistoryIndex) + //} + //tx, err := db.BeginRw(ctx) + //if err != nil { + // return err + //} + //defer tx.Rollback() + // + //execStage := progress(tx, stages.Execution) + //stageStorage := stage(sync, tx, nil, stages.StorageHistoryIndex) + //stageAcc := stage(sync, tx, nil, stages.AccountHistoryIndex) + //if pruneTo > 0 { + // pm.History = prune.Distance(stageAcc.BlockNumber - pruneTo) + // pm.Receipts = prune.Distance(stageAcc.BlockNumber - pruneTo) + // pm.CallTraces = prune.Distance(stageAcc.BlockNumber - pruneTo) + // pm.TxIndex = prune.Distance(stageAcc.BlockNumber - pruneTo) + //} + //logger.Info("ID exec", "progress", execStage) + //logger.Info("ID acc history", "progress", stageAcc.BlockNumber) + //logger.Info("ID storage history", "progress", stageStorage.BlockNumber) + // + //cfg := stagedsync.StageHistoryCfg(db, pm, dirs.Tmp) + //if unwind > 0 { //nolint:staticcheck + // u := sync.NewUnwindState(stages.StorageHistoryIndex, stageStorage.BlockNumber-unwind, stageStorage.BlockNumber) + // if err := stagedsync.UnwindStorageHistoryIndex(u, stageStorage, tx, cfg, ctx); err != nil { + // return err + // } + // u = sync.NewUnwindState(stages.AccountHistoryIndex, stageAcc.BlockNumber-unwind, stageAcc.BlockNumber) + // if err := stagedsync.UnwindAccountHistoryIndex(u, stageAcc, tx, cfg, ctx); err != nil { + // return err + // } + //} else if pruneTo > 0 { + // pa, err := sync.PruneStageState(stages.AccountHistoryIndex, stageAcc.BlockNumber, tx, db) + // if err != nil { + // return err + // } + // err = stagedsync.PruneAccountHistoryIndex(pa, tx, cfg, ctx, logger) + // if err != nil { + // return err + // } + // ps, err := sync.PruneStageState(stages.StorageHistoryIndex, stageStorage.BlockNumber, tx, db) + // if err != nil { + // return err + // } + // err = stagedsync.PruneStorageHistoryIndex(ps, tx, cfg, ctx, logger) + // if err != nil { + // return err + // } + // _ = printStages(tx, sn, borSn, agg) + //} else { + // if err := stagedsync.SpawnAccountHistoryIndex(stageAcc, tx, cfg, ctx, logger); err != nil { + // return err + // } + // if err := stagedsync.SpawnStorageHistoryIndex(stageStorage, tx, cfg, ctx, logger); err != nil { + // return err + // } + //} + //return tx.Commit() } func stageTxLookup(db kv.RwDB, ctx context.Context, logger log.Logger) error { @@ -1856,9 +1822,8 @@ var _blockWriterSingleton *blockio.BlockWriter func blocksIO(db kv.RoDB, logger log.Logger) (services.FullBlockReader, *blockio.BlockWriter) { openBlockReaderOnce.Do(func() { sn, borSn, _ := allSnapshots(context.Background(), db, logger) - histV3 := kvcfg.HistoryV3.FromDB(db) _blockReaderSingleton = freezeblocks.NewBlockReader(sn, borSn) - _blockWriterSingleton = blockio.NewBlockWriter(histV3) + _blockWriterSingleton = blockio.NewBlockWriter() }) return _blockReaderSingleton, _blockWriterSingleton } @@ -1866,7 +1831,7 @@ func blocksIO(db kv.RoDB, logger log.Logger) (services.FullBlockReader, *blockio const blockBufferSize = 128 func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig, logger log.Logger) (consensus.Engine, *vm.Config, *stagedsync.Sync, *stagedsync.Sync, stagedsync.MiningState) { - dirs, historyV3, pm := datadir.New(datadirCli), kvcfg.HistoryV3.FromDB(db), fromdb.PruneMode(db) + dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db) vmConfig := &vm.Config{} @@ -1883,7 +1848,6 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig, must(batchSize.UnmarshalText([]byte(batchSizeStr))) cfg := ethconfig.Defaults - cfg.HistoryV3 = historyV3 cfg.Prune = pm cfg.BatchSize = batchSize cfg.DeprecatedTxPool.Disable = true @@ -1968,7 +1932,6 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig, notifications.Accumulator, cfg.StateStream, /*stateStream=*/ false, - cfg.HistoryV3, dirs, blockReader, sentryControlServer.Hd, diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go index cdf88f5da27..9be136fa832 100644 --- a/cmd/integration/commands/state_stages.go +++ b/cmd/integration/commands/state_stages.go @@ -23,7 +23,6 @@ import ( "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/bitmapdb" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2" "github.com/ledgerwatch/erigon/cmd/hack/tool/fromdb" @@ -181,7 +180,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. defer borSn.Close() defer agg.Close() engine, vmConfig, stateStages, miningStages, miner := newSync(ctx, db, &miningConfig, logger1) - chainConfig, historyV3, pm := fromdb.ChainConfig(db), kvcfg.HistoryV3.FromDB(db), fromdb.PruneMode(db) + chainConfig, pm := fromdb.ChainConfig(db), fromdb.PruneMode(db) tx, err := db.BeginRw(ctx) if err != nil { @@ -224,7 +223,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. syncCfg.ReconWorkerCount = int(reconWorkers) br, _ := blocksIO(db, logger1) - execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, changeSetHook, chainConfig, engine, vmConfig, changesAcc, false, true, historyV3, dirs, + execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, changeSetHook, chainConfig, engine, vmConfig, changesAcc, false, true, dirs, br, nil, genesis, syncCfg, agg, nil) execUntilFunc := func(execToBlock uint64) func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, txc wrap.TxContainer, logger log.Logger) error { @@ -461,7 +460,6 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) e defer agg.Close() _, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) dirs := datadir.New(datadirCli) - historyV3 := kvcfg.HistoryV3.FromDB(db) tx, err := db.BeginRw(ctx) if err != nil { @@ -476,12 +474,13 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) e to := execStage.BlockNumber - unwind _ = sync.SetCurrentStage(stages.HashState) u := &stagedsync.UnwindState{ID: stages.HashState, UnwindPoint: to} - if err = stagedsync.UnwindHashStateStage(u, stage(sync, tx, nil, stages.HashState), tx, stagedsync.StageHashStateCfg(db, dirs, historyV3), ctx, logger); err != nil { + if err = stagedsync.UnwindHashStateStage(u, stage(sync, tx, nil, stages.HashState), tx, stagedsync.StageHashStateCfg(db, dirs), ctx, logger); err != nil { return err } _ = sync.SetCurrentStage(stages.IntermediateHashes) u = &stagedsync.UnwindState{ID: stages.IntermediateHashes, UnwindPoint: to} br, _ := blocksIO(db, logger) + historyV3 := true if err = stagedsync.UnwindIntermediateHashesStage(u, stage(sync, tx, nil, stages.IntermediateHashes), tx, stagedsync.StageTrieCfg(db, true, true, false, dirs.Tmp, br, nil, historyV3, agg), ctx, logger); err != nil { return err @@ -549,10 +548,6 @@ func loopExec(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) sync.EnableStages(stages.Execution) var batchSize datasize.ByteSize must(batchSize.UnmarshalText([]byte(batchSizeStr))) - historyV3, err := kvcfg.HistoryV3.Enabled(tx) - if err != nil { - return err - } from := progress(tx, stages.Execution) to := from + unwind @@ -565,7 +560,7 @@ func loopExec(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) br, _ := blocksIO(db, logger) cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, /*stateStream=*/ false, - /*badBlockHalt=*/ true, historyV3, dirs, br, nil, genesis, syncCfg, agg, nil) + /*badBlockHalt=*/ true, dirs, br, nil, genesis, syncCfg, agg, nil) // set block limit of execute stage sync.MockExecFunc(stages.Execution, func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, txc wrap.TxContainer, logger log.Logger) error { diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index 026c6686715..08a2eeacb5b 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -33,7 +33,6 @@ import ( txpool "github.com/ledgerwatch/erigon-lib/gointerfaces/txpoolproto" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon-lib/kv/remotedb" @@ -279,7 +278,7 @@ func EmbeddedServices(ctx context.Context, // ... adding back in place to see about the above statement stateCache = kvcache.New(stateCacheCfg) } else { - stateCache = kvcache.NewDummy(stateCacheCfg.StateV3) + stateCache = kvcache.NewDummy() } subscribeToStateChangesLoop(ctx, stateDiffClient, stateCache) @@ -435,20 +434,11 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger onNewSnapshot() blockReader = freezeblocks.NewBlockReader(allSnapshots, allBorSnapshots) - var histV3Enabled bool - _ = db.View(ctx, func(tx kv.Tx) error { - histV3Enabled, _ = kvcfg.HistoryV3.Enabled(tx) - return nil - }) - cfg.StateCache.StateV3 = histV3Enabled - if histV3Enabled { - logger.Info("HistoryV3", "enable", histV3Enabled) - db, err = temporal.New(rwKv, agg) - if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, nil, err - } + db, err = temporal.New(rwKv, agg) + if err != nil { + return nil, nil, nil, nil, nil, nil, nil, nil, nil, err } - stateCache = kvcache.NewDummy(cfg.StateCache.StateV3) + stateCache = kvcache.NewDummy() } // If DB can't be configured - used PrivateApiAddr as remote DB if db == nil { @@ -459,7 +449,7 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger if cfg.StateCache.CacheSize > 0 { stateCache = kvcache.New(cfg.StateCache) } else { - stateCache = kvcache.NewDummy(cfg.StateCache.StateV3) + stateCache = kvcache.NewDummy() } logger.Info("if you run RPCDaemon on same machine with Erigon add --datadir option") } diff --git a/cmd/state/commands/check_change_sets.go b/cmd/state/commands/check_change_sets.go deleted file mode 100644 index aaecc9fb067..00000000000 --- a/cmd/state/commands/check_change_sets.go +++ /dev/null @@ -1,295 +0,0 @@ -package commands - -import ( - "bytes" - "context" - "fmt" - "os" - "os/signal" - "sort" - "syscall" - "time" - - "github.com/ledgerwatch/log/v3" - "github.com/spf13/cobra" - - chain2 "github.com/ledgerwatch/erigon-lib/chain" - libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/common/datadir" - "github.com/ledgerwatch/erigon-lib/common/hexutility" - "github.com/ledgerwatch/erigon-lib/kv" - kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" - "github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2" - - "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/core/systemcontracts" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/eth/ethconsensusconfig" - "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/node/nodecfg" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/turbo/debug" - "github.com/ledgerwatch/erigon/turbo/services" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" -) - -var ( - historyfile string - nocheck bool -) - -func init() { - withBlock(checkChangeSetsCmd) - withDataDir(checkChangeSetsCmd) - checkChangeSetsCmd.Flags().StringVar(&historyfile, "historyfile", "", "path to the file where the changesets and history are expected to be. If omitted, the same as /erion/chaindata") - checkChangeSetsCmd.Flags().BoolVar(&nocheck, "nocheck", false, "set to turn off the changeset checking and only execute transaction (for performance testing)") - rootCmd.AddCommand(checkChangeSetsCmd) -} - -var checkChangeSetsCmd = &cobra.Command{ - Use: "checkChangeSets", - Short: "Re-executes historical transactions in read-only mode and checks that their outputs match the database ChangeSets", - RunE: func(cmd *cobra.Command, args []string) error { - logger := debug.SetupCobra(cmd, "check_change_sets") - return CheckChangeSets(cmd.Context(), genesis, block, chaindata, historyfile, nocheck, logger) - }, -} - -// CheckChangeSets re-executes historical transactions in read-only mode -// and checks that their outputs match the database ChangeSets. -func CheckChangeSets(ctx context.Context, genesis *types.Genesis, blockNum uint64, chaindata string, historyfile string, nocheck bool, logger log.Logger) error { - if len(historyfile) == 0 { - historyfile = chaindata - } - - startTime := time.Now() - sigs := make(chan os.Signal, 1) - interruptCh := make(chan bool, 1) - signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) - - go func() { - <-sigs - interruptCh <- true - }() - - db, err := kv2.NewMDBX(logger).Path(chaindata).Open(ctx) - if err != nil { - return err - } - dirs := datadir.New(datadirCli) - allSnapshots := freezeblocks.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), dirs.Snap, 0, logger) - defer allSnapshots.Close() - if err := allSnapshots.ReopenFolder(); err != nil { - return fmt.Errorf("reopen snapshot segments: %w", err) - } - allBorSnapshots := freezeblocks.NewBorRoSnapshots(ethconfig.Defaults.Snapshot, dirs.Snap, 0, logger) - blockReader := freezeblocks.NewBlockReader(allSnapshots, allBorSnapshots) - - chainDb := db - defer chainDb.Close() - historyDb := chainDb - if chaindata != historyfile { - historyDb = kv2.MustOpen(historyfile) - } - historyTx, err1 := historyDb.BeginRo(ctx) - if err1 != nil { - return err1 - } - defer historyTx.Rollback() - chainConfig := genesis.Config - vmConfig := vm.Config{} - - noOpWriter := state.NewNoopWriter() - - interrupt := false - rwtx, err := chainDb.BeginRw(ctx) - if err != nil { - return err - } - defer rwtx.Rollback() - - execAt, err1 := stages.GetStageProgress(rwtx, stages.Execution) - if err1 != nil { - return err1 - } - historyAt, err1 := stages.GetStageProgress(rwtx, stages.StorageHistoryIndex) - if err1 != nil { - return err1 - } - - commitEvery := time.NewTicker(30 * time.Second) - defer commitEvery.Stop() - - engine := initConsensusEngine(ctx, chainConfig, blockReader, logger) - - for !interrupt { - - if blockNum > execAt { - log.Warn(fmt.Sprintf("Force stop: because trying to check blockNumber=%d higher than Exec stage=%d", blockNum, execAt)) - break - } - if blockNum > historyAt { - log.Warn(fmt.Sprintf("Force stop: because trying to check blockNumber=%d higher than History stage=%d", blockNum, historyAt)) - break - } - - blockHash, err := blockReader.CanonicalHash(ctx, historyTx, blockNum) - if err != nil { - return err - } - var b *types.Block - b, _, err = blockReader.BlockWithSenders(ctx, historyTx, blockHash, blockNum) - if err != nil { - return err - } - if b == nil { - break - } - reader := state.NewPlainState(historyTx, blockNum, systemcontracts.SystemContractCodeLookup[chainConfig.ChainName]) - //reader.SetTrace(blockNum == uint64(block)) - intraBlockState := state.New(reader) - csw := state.NewChangeSetWriterPlain(nil /* db */, blockNum) - var blockWriter state.StateWriter - if nocheck { - blockWriter = noOpWriter - } else { - blockWriter = csw - } - - getHeader := func(hash libcommon.Hash, number uint64) *types.Header { - h, e := blockReader.Header(ctx, rwtx, hash, number) - if e != nil { - panic(e) - } - return h - } - receipts, err1 := runBlock(engine, intraBlockState, noOpWriter, blockWriter, chainConfig, getHeader, b, vmConfig, blockNum == block, logger) - if err1 != nil { - return err1 - } - if chainConfig.IsByzantium(blockNum) { - receiptSha := types.DeriveSha(receipts) - if receiptSha != b.ReceiptHash() { - return fmt.Errorf("mismatched receipt headers for block %d", blockNum) - } - } - - if !nocheck { - accountChanges, err := csw.GetAccountChanges() - if err != nil { - return err - } - sort.Sort(accountChanges) - i := 0 - match := true - err = historyv2.ForPrefix(historyTx, kv.AccountChangeSet, hexutility.EncodeTs(blockNum), func(blockN uint64, k, v []byte) error { - if i >= len(accountChanges.Changes) { - if len(v) != 0 { - fmt.Printf("Unexpected account changes in block %d\n", blockNum) - fmt.Printf("In the database: ======================\n") - fmt.Printf("%d: 0x%x: %x\n", i, k, v) - match = false - } - i++ - return nil - } - c := accountChanges.Changes[i] - if bytes.Equal(c.Key, k) && bytes.Equal(c.Value, v) { - i++ - return nil - } - if len(v) == 0 { - return nil - } - - match = false - fmt.Printf("Unexpected account changes in block %d\n", blockNum) - fmt.Printf("In the database: ======================\n") - fmt.Printf("%d: 0x%x: %x\n", i, k, v) - fmt.Printf("Expected: ==========================\n") - fmt.Printf("%d: 0x%x %x\n", i, c.Key, c.Value) - i++ - return nil - }) - if err != nil { - return err - } - - if !match { - return fmt.Errorf("check change set failed") - } - - i = 0 - expectedStorageChanges, err := csw.GetStorageChanges() - if err != nil { - return err - } - if expectedStorageChanges == nil { - expectedStorageChanges = historyv2.NewChangeSet() - } - sort.Sort(expectedStorageChanges) - match = true - err = historyv2.ForPrefix(historyTx, kv.StorageChangeSet, hexutility.EncodeTs(blockNum), func(blockN uint64, k, v []byte) error { - if i >= len(expectedStorageChanges.Changes) { - fmt.Printf("Unexpected storage changes in block %d\nIn the database: ======================\n", blockNum) - fmt.Printf("0x%x: %x\n", k, v) - match = false - i++ - return nil - } - c := expectedStorageChanges.Changes[i] - i++ - if bytes.Equal(c.Key, k) && bytes.Equal(c.Value, v) { - return nil - } - match = false - fmt.Printf("Unexpected storage changes in block %d\nIn the database: ======================\n", blockNum) - fmt.Printf("0x%x: %x\n", k, v) - fmt.Printf("Expected: ==========================\n") - fmt.Printf("0x%x %x\n", c.Key, c.Value) - i++ - return nil - }) - if err != nil { - return err - } - if !match { - return fmt.Errorf("check change set failed") - } - } - - blockNum++ - if blockNum%1000 == 0 { - logger.Info("Checked", "blocks", blockNum) - } - - // Check for interrupts - select { - case interrupt = <-interruptCh: - fmt.Println("interrupted, please wait for cleanup...") - default: - } - } - logger.Info("Checked", "blocks", blockNum, "next time specify --block", blockNum, "duration", time.Since(startTime)) - return nil -} - -func initConsensusEngine(ctx context.Context, cc *chain2.Config, blockReader services.FullBlockReader, logger log.Logger) (engine consensus.Engine) { - config := ethconfig.Defaults - - var consensusConfig interface{} - - if cc.Clique != nil { - consensusConfig = params.CliqueSnapshot - } else if cc.Aura != nil { - consensusConfig = &config.Aura - } else if cc.Bor != nil { - consensusConfig = cc.Bor - } else { - consensusConfig = &config.Ethash - } - return ethconsensusconfig.CreateConsensusEngine(ctx, &nodecfg.Config{Dirs: datadir.New(datadirCli)}, cc, consensusConfig, config.Miner.Notify, config.Miner.Noverify, nil /* heimdallClient */, config.WithoutHeimdall, blockReader, true /* readonly */, logger) -} diff --git a/cmd/state/commands/opcode_tracer.go b/cmd/state/commands/opcode_tracer.go index 3f19b3bbe80..f2876e989bf 100644 --- a/cmd/state/commands/opcode_tracer.go +++ b/cmd/state/commands/opcode_tracer.go @@ -21,7 +21,6 @@ import ( chain2 "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon/common/debug" @@ -422,14 +421,6 @@ func OpcodeTracer(genesis *types.Genesis, blockNum uint64, chaindata string, num } defer historyTx.Rollback() - var historyV3 bool - chainDb.View(context.Background(), func(tx kv.Tx) (err error) { - historyV3, err = kvcfg.HistoryV3.Enabled(tx) - if err != nil { - return err - } - return nil - }) dirs := datadir2.New(filepath.Dir(chainDb.(*mdbx.MdbxKV).Path())) blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, dirs.Snap, 0, log.New()), nil /* BorSnapshots */) @@ -588,7 +579,7 @@ func OpcodeTracer(genesis *types.Genesis, blockNum uint64, chaindata string, num ot.fsumWriter = bufio.NewWriter(fsum) } - dbstate, err := rpchelper.CreateHistoryStateReader(historyTx, block.NumberU64(), 0, historyV3, chainConfig.ChainName) + dbstate, err := rpchelper.CreateHistoryStateReader(historyTx, block.NumberU64(), 0, chainConfig.ChainName) if err != nil { return err } diff --git a/cmd/state/commands/state_root.go b/cmd/state/commands/state_root.go index bb39e75da35..d3281879167 100644 --- a/cmd/state/commands/state_root.go +++ b/cmd/state/commands/state_root.go @@ -13,7 +13,6 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" datadir2 "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb/blockio" @@ -48,16 +47,9 @@ var stateRootCmd = &cobra.Command{ } func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { - var histV3 bool - if err := db.View(context.Background(), func(tx kv.Tx) error { - histV3, _ = kvcfg.HistoryV3.Enabled(tx) - return nil - }); err != nil { - panic(err) - } dirs := datadir2.New(filepath.Dir(db.(*kv2.MdbxKV).Path())) br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, dirs.Snap, 0, log.New()), nil /* BorSnapshots */) - bw := blockio.NewBlockWriter(histV3) + bw := blockio.NewBlockWriter() return br, bw } @@ -162,7 +154,7 @@ func StateRoot(ctx context.Context, genesis *types.Genesis, blockNum uint64, dat if err = rwTx.ClearBucket(kv.HashedStorage); err != nil { return err } - if err = stagedsync.PromoteHashedStateCleanly("hashedstate", rwTx, stagedsync.StageHashStateCfg(nil, dirs, false), ctx, logger); err != nil { + if err = stagedsync.PromoteHashedStateCleanly("hashedstate", rwTx, stagedsync.StageHashStateCfg(nil, dirs), ctx, logger); err != nil { return err } var root libcommon.Hash diff --git a/cmd/state/verify/verify_txlookup.go b/cmd/state/verify/verify_txlookup.go index be2d380407b..bc0f57cb9b9 100644 --- a/cmd/state/verify/verify_txlookup.go +++ b/cmd/state/verify/verify_txlookup.go @@ -13,7 +13,6 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" datadir2 "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon/core/rawdb/blockio" "github.com/ledgerwatch/erigon/eth/ethconfig" @@ -23,16 +22,9 @@ import ( ) func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { - var histV3 bool - if err := db.View(context.Background(), func(tx kv.Tx) error { - histV3, _ = kvcfg.HistoryV3.Enabled(tx) - return nil - }); err != nil { - panic(err) - } dirs := datadir2.New(filepath.Dir(db.(*mdbx.MdbxKV).Path())) br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, dirs.Snap, 0, log.New()), nil /* BorSnapshots */) - bw := blockio.NewBlockWriter(histV3) + bw := blockio.NewBlockWriter() return br, bw } diff --git a/cmd/txpool/main.go b/cmd/txpool/main.go index 2003a4ce082..60cca0b676e 100644 --- a/cmd/txpool/main.go +++ b/cmd/txpool/main.go @@ -16,7 +16,6 @@ import ( remote "github.com/ledgerwatch/erigon-lib/gointerfaces/remoteproto" proto_sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentryproto" "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/remotedb" "github.com/ledgerwatch/erigon-lib/kv/remotedbserver" "github.com/ledgerwatch/erigon-lib/txpool" @@ -158,7 +157,6 @@ func doTxpool(ctx context.Context, logger log.Logger) error { cacheConfig := kvcache.DefaultCoherentConfig cacheConfig.MetricsLabel = "txpool" - cacheConfig.StateV3 = kvcfg.HistoryV3.FromDB(coreDB) //TODO: cache to txpool db cfg.TracedSenders = make([]string, len(traceSenders)) for i, senderHex := range traceSenders { diff --git a/core/genesis_test.go b/core/genesis_test.go index d29536c0226..d45eb444d46 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -28,7 +28,7 @@ import ( func TestGenesisBlockHashes(t *testing.T) { t.Parallel() logger := log.New() - _, db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) check := func(network string) { genesis := core.GenesisBlockByChainName(network) tx, err := db.BeginRw(context.Background()) @@ -88,7 +88,7 @@ func TestGenesisBlockRoots(t *testing.T) { func TestCommitGenesisIdempotency(t *testing.T) { t.Parallel() logger := log.New() - _, db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) tx, err := db.BeginRw(context.Background()) require.NoError(t, err) defer tx.Rollback() @@ -133,7 +133,7 @@ func TestAllocConstructor(t *testing.T) { defer tx.Rollback() //TODO: support historyV3 - reader, err := rpchelper.CreateHistoryStateReader(tx, 1, 0, m.HistoryV3, genSpec.Config.ChainName) + reader, err := rpchelper.CreateHistoryStateReader(tx, 1, 0, genSpec.Config.ChainName) require.NoError(err) state := state.New(reader) balance := state.GetBalance(address) diff --git a/core/genesis_write.go b/core/genesis_write.go index 915718f63e9..9520adf4b4c 100644 --- a/core/genesis_write.go +++ b/core/genesis_write.go @@ -20,7 +20,6 @@ import ( "context" "crypto/ecdsa" "embed" - "encoding/binary" "encoding/json" "fmt" "math/big" @@ -36,7 +35,6 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon/common" @@ -186,27 +184,9 @@ func WriteGenesisState(g *types.Genesis, tx kv.RwTx, tmpDir string, logger log.L if err != nil { return nil, nil, err } - histV3, err := kvcfg.HistoryV3.Enabled(tx) - if err != nil { - panic(err) - } var stateWriter state.StateWriter - if histV3 { - stateWriter = state.NewNoopWriter() - } else { - for addr, account := range g.Alloc { - if len(account.Code) > 0 || len(account.Storage) > 0 { - // Special case for weird tests - inaccessible storage - var b [8]byte - binary.BigEndian.PutUint64(b[:], state.FirstContractIncarnation) - if err := tx.Put(kv.IncarnationMap, addr[:], b[:]); err != nil { - return nil, nil, err - } - } - } - stateWriter = state.NewPlainStateWriter(tx, tx, 0) - } + stateWriter = state.NewNoopWriter() if block.Number().Sign() != 0 { return nil, statedb, fmt.Errorf("can't commit genesis block with number > 0") @@ -215,16 +195,6 @@ func WriteGenesisState(g *types.Genesis, tx kv.RwTx, tmpDir string, logger log.L return nil, statedb, fmt.Errorf("cannot write state: %w", err) } - if !histV3 { - if csw, ok := stateWriter.(state.WriterWithChangeSets); ok { - if err := csw.WriteChangeSets(); err != nil { - return nil, statedb, fmt.Errorf("cannot write change sets: %w", err) - } - if err := csw.WriteHistory(); err != nil { - return nil, statedb, fmt.Errorf("cannot write history: %w", err) - } - } - } return block, statedb, nil } diff --git a/core/rawdb/blockio/block_writer.go b/core/rawdb/blockio/block_writer.go index 4de3d4820ab..21151f0299c 100644 --- a/core/rawdb/blockio/block_writer.go +++ b/core/rawdb/blockio/block_writer.go @@ -25,15 +25,10 @@ import ( // BlockReader can read blocks from db and snapshots type BlockWriter struct { - historyV3 bool - - // adding Auto-Increment BlockID - // allow store non-canonical Txs/Senders - txsV3 bool } -func NewBlockWriter(historyV3 bool) *BlockWriter { - return &BlockWriter{historyV3: historyV3, txsV3: true} +func NewBlockWriter() *BlockWriter { + return &BlockWriter{} } func (w *BlockWriter) FillHeaderNumberIndex(logPrefix string, tx kv.RwTx, tmpDir string, from, to uint64, ctx context.Context, logger log.Logger) error { @@ -59,23 +54,19 @@ func (w *BlockWriter) FillHeaderNumberIndex(logPrefix string, tx kv.RwTx, tmpDir } func (w *BlockWriter) MakeBodiesCanonical(tx kv.RwTx, from uint64) error { - if w.historyV3 { - if err := rawdb.AppendCanonicalTxNums(tx, from); err != nil { - var e1 rawdbv3.ErrTxNumsAppendWithGap - if ok := errors.As(err, &e1); ok { - // try again starting from latest available block - return rawdb.AppendCanonicalTxNums(tx, e1.LastBlock()+1) - } - return err + if err := rawdb.AppendCanonicalTxNums(tx, from); err != nil { + var e1 rawdbv3.ErrTxNumsAppendWithGap + if ok := errors.As(err, &e1); ok { + // try again starting from latest available block + return rawdb.AppendCanonicalTxNums(tx, e1.LastBlock()+1) } + return err } return nil } func (w *BlockWriter) MakeBodiesNonCanonical(tx kv.RwTx, from uint64) error { - if w.historyV3 { - if err := rawdbv3.TxNums.Truncate(tx, from); err != nil { - return err - } + if err := rawdbv3.TxNums.Truncate(tx, from); err != nil { + return err } return nil } diff --git a/core/rawdb/rawdbreset/reset_stages.go b/core/rawdb/rawdbreset/reset_stages.go index 622a5e95b0a..1bd985b2c90 100644 --- a/core/rawdb/rawdbreset/reset_stages.go +++ b/core/rawdb/rawdbreset/reset_stages.go @@ -8,10 +8,8 @@ import ( "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/backup" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/temporal" "github.com/ledgerwatch/erigon-lib/state" - "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/rawdb/blockio" "github.com/ledgerwatch/erigon/eth/stagedsync" @@ -121,25 +119,18 @@ func WarmupExec(ctx context.Context, db kv.RwDB) (err error) { for _, tbl := range stateBuckets { backup.WarmupTable(ctx, db, tbl, log.LvlInfo, backup.ReadAheadThreads) } - historyV3 := kvcfg.HistoryV3.FromDB(db) - if historyV3 { //hist v2 is too big, if you have so much ram, just use `cat mdbx.dat > /dev/null` to warmup - for _, tbl := range stateHistoryV3Buckets { - backup.WarmupTable(ctx, db, tbl, log.LvlInfo, backup.ReadAheadThreads) - } + for _, tbl := range stateHistoryV3Buckets { + backup.WarmupTable(ctx, db, tbl, log.LvlInfo, backup.ReadAheadThreads) } return } func ResetExec(ctx context.Context, db kv.RwDB, chain string, tmpDir string, logger log.Logger) (err error) { - historyV3 := kvcfg.HistoryV3.FromDB(db) - cleanupList := make([]string, 0) - if historyV3 { - cleanupList = append(cleanupList, stateBuckets...) - cleanupList = append(cleanupList, stateHistoryBuckets...) - cleanupList = append(cleanupList, stateHistoryV3Buckets...) - cleanupList = append(cleanupList, stateV3Buckets...) - } + cleanupList = append(cleanupList, stateBuckets...) + cleanupList = append(cleanupList, stateHistoryBuckets...) + cleanupList = append(cleanupList, stateHistoryV3Buckets...) + cleanupList = append(cleanupList, stateV3Buckets...) return db.Update(ctx, func(tx kv.RwTx) error { if err := clearStageProgress(tx, stages.Execution, stages.HashState, stages.IntermediateHashes); err != nil { @@ -149,30 +140,22 @@ func ResetExec(ctx context.Context, db kv.RwDB, chain string, tmpDir string, log if err := backup.ClearTables(ctx, db, tx, cleanupList...); err != nil { return nil } - if !historyV3 { - _ = stages.SaveStageProgress(tx, stages.Execution, 0) - genesis := core.GenesisBlockByChainName(chain) - if _, _, err := core.WriteGenesisState(genesis, tx, tmpDir, logger); err != nil { - return err - } - } else { - v3db := db.(*temporal.DB) - agg := v3db.Agg() - aggTx := agg.BeginFilesRo() - defer aggTx.Close() - doms, err := state.NewSharedDomains(tx, logger) - if err != nil { - return err - } - defer doms.Close() + v3db := db.(*temporal.DB) + agg := v3db.Agg() + aggTx := agg.BeginFilesRo() + defer aggTx.Close() + doms, err := state.NewSharedDomains(tx, logger) + if err != nil { + return err + } + defer doms.Close() - _ = stages.SaveStageProgress(tx, stages.Execution, doms.BlockNum()) - mxs := agg.EndTxNumMinimax() / agg.StepSize() - if mxs > 0 { - mxs-- - } - log.Info("[reset] exec", "toBlock", doms.BlockNum(), "toTxNum", doms.TxNum(), "maxStepInFiles", mxs) + _ = stages.SaveStageProgress(tx, stages.Execution, doms.BlockNum()) + mxs := agg.EndTxNumMinimax() / agg.StepSize() + if mxs > 0 { + mxs-- } + log.Info("[reset] exec", "toBlock", doms.BlockNum(), "toTxNum", doms.TxNum(), "maxStepInFiles", mxs) return nil }) diff --git a/core/state/plain_readonly.go b/core/state/plain_readonly.go index 6f77efce1bc..b002ae61197 100644 --- a/core/state/plain_readonly.go +++ b/core/state/plain_readonly.go @@ -29,7 +29,6 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/core/state/historyv2read" @@ -58,10 +57,6 @@ type PlainState struct { } func NewPlainState(tx kv.Tx, blockNr uint64, systemContractLookup map[libcommon.Address][]libcommon.CodeRecord) *PlainState { - histV3, _ := kvcfg.HistoryV3.Enabled(tx) - if histV3 { - panic("Please use HistoryStateReaderV3 with HistoryV3") - } ps := &PlainState{ tx: tx, blockNr: blockNr, diff --git a/core/state/state_test.go b/core/state/state_test.go index f0f2242ab25..4ae22af0a42 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -25,7 +25,6 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/memdb" checker "gopkg.in/check.v1" @@ -73,10 +72,7 @@ func (s *StateSuite) TestDump(c *checker.C) { } defer tx.Rollback() - historyV3, err := kvcfg.HistoryV3.Enabled(tx) - if err != nil { - panic(err) - } + historyV3 := false //TODO: https://github.com/ledgerwatch/erigon/issues/10323 got := string(NewDumper(tx, 1, historyV3).DefaultDump()) want := `{ "root": "71edff0130dd2385947095001c73d9e28d862fc286fca2b922ca6f6f3cddfdd2", @@ -369,10 +365,7 @@ func TestDump(t *testing.T) { } // check that dump contains the state objects that are in trie - historyV3, err := kvcfg.HistoryV3.Enabled(tx) - if err != nil { - panic(err) - } + historyV3 := false got := string(NewDumper(tx, 2, historyV3).DefaultDump()) want := `{ "root": "0000000000000000000000000000000000000000000000000000000000000000", diff --git a/core/test/domains_restart_test.go b/core/test/domains_restart_test.go index 70021f94232..ac383b8685f 100644 --- a/core/test/domains_restart_test.go +++ b/core/test/domains_restart_test.go @@ -22,7 +22,6 @@ import ( "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon-lib/kv/temporal" @@ -60,12 +59,6 @@ func testDbAndAggregatorv3(t *testing.T, fpath string, aggStep uint64) (kv.RwDB, agg.DisableFsync() require.NoError(t, err) - // v3 setup - err = db.Update(context.Background(), func(tx kv.RwTx) error { - return kvcfg.HistoryV3.ForceWrite(tx, true) - }) - require.NoError(t, err) - tdb, err := temporal.New(db, agg) require.NoError(t, err) db = tdb diff --git a/core/vm/gas_table_test.go b/core/vm/gas_table_test.go index b0c0a8850c9..83d9088e472 100644 --- a/core/vm/gas_table_test.go +++ b/core/vm/gas_table_test.go @@ -146,7 +146,7 @@ var createGasTests = []struct { func TestCreateGas(t *testing.T) { t.Parallel() - _, db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) for i, tt := range createGasTests { address := libcommon.BytesToAddress([]byte("contract")) @@ -164,8 +164,8 @@ func TestCreateGas(t *testing.T) { defer domains.Close() txc.Doms = domains - stateReader = rpchelper.NewLatestStateReader(tx, true) - stateWriter = rpchelper.NewLatestStateWriter(txc, 0, true) + stateReader = rpchelper.NewLatestStateReader(tx) + stateWriter = rpchelper.NewLatestStateWriter(txc, 0) s := state.New(stateReader) s.CreateAccount(address, true) diff --git a/erigon-lib/common/dbg/dbg_env.go b/erigon-lib/common/dbg/dbg_env.go index 41b83c0d442..7096a122208 100644 --- a/erigon-lib/common/dbg/dbg_env.go +++ b/erigon-lib/common/dbg/dbg_env.go @@ -1,18 +1,18 @@ package dbg import ( - "fmt" "os" "strconv" "time" "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/log/v3" ) func EnvString(envVarName string, defaultVal string) string { v, _ := os.LookupEnv(envVarName) if v != "" { - fmt.Printf("[dbg] env %s=%s\n", envVarName, v) + log.Info("[dbg] env", envVarName, v) return v } return defaultVal @@ -20,11 +20,11 @@ func EnvString(envVarName string, defaultVal string) string { func EnvBool(envVarName string, defaultVal bool) bool { v, _ := os.LookupEnv(envVarName) if v == "true" { - fmt.Printf("[dbg] env %s=%t\n", envVarName, true) + log.Info("[dbg] env", envVarName, true) return true } if v == "false" { - fmt.Printf("[dbg] env %s=%t\n", envVarName, false) + log.Info("[dbg] env", envVarName, false) return false } return defaultVal @@ -36,7 +36,7 @@ func EnvInt(envVarName string, defaultVal int) int { if err != nil { panic(err) } - fmt.Printf("[dbg] env %s=%d\n", envVarName, i) + log.Info("[dbg] env", envVarName, i) return i } return defaultVal @@ -48,7 +48,7 @@ func EnvDataSize(envVarName string, defaultVal datasize.ByteSize) datasize.ByteS if err != nil { panic(err) } - fmt.Printf("[dbg] env %s=%s\n", envVarName, val) + log.Info("[dbg] env", envVarName, val) return val } return defaultVal @@ -57,8 +57,7 @@ func EnvDataSize(envVarName string, defaultVal datasize.ByteSize) datasize.ByteS func EnvDuration(envVarName string, defaultVal time.Duration) time.Duration { v, _ := os.LookupEnv(envVarName) if v != "" { - fmt.Printf("[dbg] env %s=%s\n", envVarName, v) - + log.Info("[dbg] env", envVarName, v) val, err := time.ParseDuration(v) if err != nil { panic(err) diff --git a/erigon-lib/kv/kvcache/cache_test.go b/erigon-lib/kv/kvcache/cache_test.go index d4fb9a79368..8055aa1e68a 100644 --- a/erigon-lib/kv/kvcache/cache_test.go +++ b/erigon-lib/kv/kvcache/cache_test.go @@ -107,7 +107,7 @@ func TestEviction(t *testing.T) { c := New(cfg) dirs := datadir.New(t.TempDir()) - _, db, _ := temporaltest.NewTestDB(t, dirs) + db, _ := temporaltest.NewTestDB(t, dirs) k1, k2 := [20]byte{1}, [20]byte{2} var id uint64 @@ -167,7 +167,7 @@ func TestAPI(t *testing.T) { require := require.New(t) c := New(DefaultCoherentConfig) k1, k2 := [20]byte{1}, [20]byte{2} - _, db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) get := func(key [20]byte, expectTxnID uint64) (res [1]chan []byte) { wg := sync.WaitGroup{} for i := 0; i < len(res); i++ { @@ -357,7 +357,7 @@ func TestCode(t *testing.T) { t.Skip("TODO: use state reader/writer instead of Put()") require, ctx := require.New(t), context.Background() c := New(DefaultCoherentConfig) - _, db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) k1, k2 := [20]byte{1}, [20]byte{2} _ = db.Update(ctx, func(tx kv.RwTx) error { diff --git a/erigon-lib/kv/kvcache/dummy.go b/erigon-lib/kv/kvcache/dummy.go index bb5b311c70f..2ca48855c60 100644 --- a/erigon-lib/kv/kvcache/dummy.go +++ b/erigon-lib/kv/kvcache/dummy.go @@ -30,7 +30,7 @@ type DummyCache struct { var _ Cache = (*DummyCache)(nil) // compile-time interface check var _ CacheView = (*DummyView)(nil) // compile-time interface check -func NewDummy(stateV3 bool) *DummyCache { return &DummyCache{stateV3: stateV3} } +func NewDummy() *DummyCache { return &DummyCache{stateV3: true} } func (c *DummyCache) View(_ context.Context, tx kv.Tx) (CacheView, error) { return &DummyView{cache: c, tx: tx}, nil } diff --git a/erigon-lib/kv/kvcfg/accessors_config.go b/erigon-lib/kv/kvcfg/accessors_config.go index 5c68771e45d..5300277f317 100644 --- a/erigon-lib/kv/kvcfg/accessors_config.go +++ b/erigon-lib/kv/kvcfg/accessors_config.go @@ -24,10 +24,6 @@ import ( type ConfigKey []byte -var ( - HistoryV3 = ConfigKey("history.v3") -) - func (k ConfigKey) Enabled(tx kv.Tx) (bool, error) { return kv.GetBool(tx, kv.DatabaseInfo, k) } func (k ConfigKey) FromDB(db kv.RoDB) (enabled bool) { if err := db.View(context.Background(), func(tx kv.Tx) error { diff --git a/erigon-lib/kv/temporal/kv_temporal.go b/erigon-lib/kv/temporal/kv_temporal.go index 2b7d3f4ba16..67f026d1b56 100644 --- a/erigon-lib/kv/temporal/kv_temporal.go +++ b/erigon-lib/kv/temporal/kv_temporal.go @@ -6,7 +6,6 @@ import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/iter" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/state" @@ -51,9 +50,6 @@ type DB struct { } func New(db kv.RwDB, agg *state.Aggregator) (*DB, error) { - if !kvcfg.HistoryV3.FromDB(db) { - panic("not supported") - } return &DB{RwDB: db, agg: agg}, nil } func (db *DB) Agg() *state.Aggregator { return db.agg } diff --git a/erigon-lib/kv/temporal/temporaltest/kv_temporal_testdb.go b/erigon-lib/kv/temporal/temporaltest/kv_temporal_testdb.go index 4d5c9852086..eb49c434a2d 100644 --- a/erigon-lib/kv/temporal/temporaltest/kv_temporal_testdb.go +++ b/erigon-lib/kv/temporal/temporaltest/kv_temporal_testdb.go @@ -7,7 +7,6 @@ import ( "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/config3" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon-lib/kv/temporal" "github.com/ledgerwatch/erigon-lib/state" @@ -15,11 +14,10 @@ import ( ) // nolint:thelper -func NewTestDB(tb testing.TB, dirs datadir.Dirs) (histV3 bool, db kv.RwDB, agg *state.Aggregator) { +func NewTestDB(tb testing.TB, dirs datadir.Dirs) (db kv.RwDB, agg *state.Aggregator) { if tb != nil { tb.Helper() } - historyV3 := true logger := log.New() if tb != nil { @@ -27,15 +25,8 @@ func NewTestDB(tb testing.TB, dirs datadir.Dirs) (histV3 bool, db kv.RwDB, agg * } else { db = memdb.New(dirs.DataDir) } - var err error - err = db.UpdateNosync(context.Background(), func(tx kv.RwTx) error { - _, _ = kvcfg.HistoryV3.WriteOnce(tx, historyV3) - return nil - }) - if err != nil { - panic(err) - } + var err error agg, err = state.NewAggregator(context.Background(), dirs, config3.HistoryV3AggregationStep, db, logger) if err != nil { panic(err) @@ -48,5 +39,5 @@ func NewTestDB(tb testing.TB, dirs datadir.Dirs) (histV3 bool, db kv.RwDB, agg * if err != nil { panic(err) } - return true, db, agg + return db, agg } diff --git a/erigon-lib/txpool/pool.go b/erigon-lib/txpool/pool.go index 5fcdad752c6..1b4f38eb9e0 100644 --- a/erigon-lib/txpool/pool.go +++ b/erigon-lib/txpool/pool.go @@ -50,7 +50,7 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil" remote "github.com/ledgerwatch/erigon-lib/gointerfaces/remoteproto" - txpoolproto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpoolproto" + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpoolproto" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon-lib/kv/mdbx" diff --git a/erigon-lib/txpool/pool_fuzz_test.go b/erigon-lib/txpool/pool_fuzz_test.go index 0c7c23256d5..c8079abb2f4 100644 --- a/erigon-lib/txpool/pool_fuzz_test.go +++ b/erigon-lib/txpool/pool_fuzz_test.go @@ -313,7 +313,7 @@ func FuzzOnNewBlocks(f *testing.F) { var prevHashes types.Hashes ch := make(chan types.Announcements, 100) - _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) db := memdb.NewTestPoolDB(t) cfg := txpoolcfg.DefaultConfig diff --git a/erigon-lib/txpool/pool_test.go b/erigon-lib/txpool/pool_test.go index 2b13971c6f3..f84a992ba16 100644 --- a/erigon-lib/txpool/pool_test.go +++ b/erigon-lib/txpool/pool_test.go @@ -51,7 +51,7 @@ func TestNonceFromAddress(t *testing.T) { assert, require := assert.New(t), require.New(t) ch := make(chan types.Announcements, 100) - _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) db := memdb.NewTestPoolDB(t) cfg := txpoolcfg.DefaultConfig @@ -173,7 +173,7 @@ func TestReplaceWithHigherFee(t *testing.T) { t.Skip("TODO") assert, require := assert.New(t), require.New(t) ch := make(chan types.Announcements, 100) - _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) db := memdb.NewTestPoolDB(t) cfg := txpoolcfg.DefaultConfig @@ -292,7 +292,7 @@ func TestReverseNonces(t *testing.T) { t.Skip("TODO") assert, require := assert.New(t), require.New(t) ch := make(chan types.Announcements, 100) - _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) db := memdb.NewTestPoolDB(t) cfg := txpoolcfg.DefaultConfig @@ -421,7 +421,7 @@ func TestTxPoke(t *testing.T) { t.Skip("TODO") assert, require := assert.New(t), require.New(t) ch := make(chan types.Announcements, 100) - _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) db := memdb.NewTestPoolDB(t) cfg := txpoolcfg.DefaultConfig @@ -682,7 +682,7 @@ func TestShanghaiValidateTx(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { ch := make(chan types.Announcements, 100) - _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) cfg := txpoolcfg.DefaultConfig @@ -736,7 +736,7 @@ func TestBlobTxReplacement(t *testing.T) { t.Skip("TODO") assert, require := assert.New(t), require.New(t) ch := make(chan types.Announcements, 5) - _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) db := memdb.NewTestPoolDB(t) cfg := txpoolcfg.DefaultConfig @@ -951,7 +951,7 @@ func TestDropRemoteAtNoGossip(t *testing.T) { t.Skip("TODO") assert, require := assert.New(t), require.New(t) ch := make(chan types.Announcements, 100) - _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) db := memdb.NewTestPoolDB(t) cfg := txpoolcfg.DefaultConfig @@ -1060,7 +1060,7 @@ func TestBlobSlots(t *testing.T) { t.Skip("TODO") assert, require := assert.New(t), require.New(t) ch := make(chan types.Announcements, 5) - _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) db := memdb.NewTestPoolDB(t) cfg := txpoolcfg.DefaultConfig @@ -1140,7 +1140,7 @@ func TestGasLimitChanged(t *testing.T) { assert, require := assert.New(t), require.New(t) ch := make(chan types.Announcements, 100) - _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) db := memdb.NewTestPoolDB(t) cfg := txpoolcfg.DefaultConfig sendersCache := kvcache.New(kvcache.DefaultCoherentConfig) diff --git a/eth/backend.go b/eth/backend.go index 89ccb228606..256db206c3e 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -65,11 +65,10 @@ import ( remote "github.com/ledgerwatch/erigon-lib/gointerfaces/remoteproto" rpcsentinel "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinelproto" protosentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentryproto" - txpoolproto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpoolproto" + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpoolproto" prototypes "github.com/ledgerwatch/erigon-lib/gointerfaces/typesproto" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/remotedbserver" "github.com/ledgerwatch/erigon-lib/kv/temporal" libstate "github.com/ledgerwatch/erigon-lib/state" @@ -262,8 +261,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger return err } - config.HistoryV3, err = kvcfg.HistoryV3.WriteOnce(tx, config.HistoryV3) - return err + return nil }); err != nil { return nil, err } @@ -341,19 +339,17 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger // Check if we have an already initialized chain and fall back to // that if so. Otherwise we need to generate a new genesis spec. - blockReader, blockWriter, allSnapshots, allBorSnapshots, agg, err := setUpBlockReader(ctx, chainKv, config.Dirs, config, config.HistoryV3, chainConfig.Bor != nil, logger) + blockReader, blockWriter, allSnapshots, allBorSnapshots, agg, err := setUpBlockReader(ctx, chainKv, config.Dirs, config, chainConfig.Bor != nil, logger) if err != nil { return nil, err } backend.agg, backend.blockSnapshots, backend.blockReader, backend.blockWriter = agg, allSnapshots, blockReader, blockWriter - if config.HistoryV3 { - backend.chainDB, err = temporal.New(backend.chainDB, agg) - if err != nil { - return nil, err - } - chainKv = backend.chainDB //nolint + backend.chainDB, err = temporal.New(backend.chainDB, agg) + if err != nil { + return nil, err } + chainKv = backend.chainDB //nolint if err := backend.setUpSnapDownloader(ctx, config.Downloader); err != nil { return nil, err @@ -558,16 +554,12 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger dirs, notifications, blockReader, blockWriter, backend.agg, backend.silkworm, terseLogger) chainReader := consensuschain.NewReader(chainConfig, txc.Tx, blockReader, logger) // We start the mining step - if err := stages2.StateStep(ctx, chainReader, backend.engine, txc, stateSync, header, body, unwindPoint, headersChain, bodiesChain, config.HistoryV3); err != nil { + if err := stages2.StateStep(ctx, chainReader, backend.engine, txc, stateSync, header, body, unwindPoint, headersChain, bodiesChain); err != nil { logger.Warn("Could not validate block", "err", err) return err } var progress uint64 - if config.HistoryV3 { - progress, err = stages.GetStageProgress(txc.Tx, stages.Execution) - } else { - progress, err = stages.GetStageProgress(txc.Tx, stages.IntermediateHashes) - } + progress, err = stages.GetStageProgress(txc.Tx, stages.Execution) if err != nil { return err } @@ -637,7 +629,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.newTxs = make(chan libtypes.Announcements, 1024) //defer close(newTxs) backend.txPoolDB, backend.txPool, backend.txPoolFetch, backend.txPoolSend, backend.txPoolGrpcServer, err = txpooluitl.AllComponents( - ctx, config.TxPool, kvcache.NewDummy(config.HistoryV3), backend.newTxs, chainKv, backend.sentriesClient.Sentries(), stateDiffClient, misc.Eip1559FeeCalculator, logger, + ctx, config.TxPool, kvcache.NewDummy(), backend.newTxs, chainKv, backend.sentriesClient.Sentries(), stateDiffClient, misc.Eip1559FeeCalculator, logger, ) if err != nil { return nil, err @@ -679,7 +671,6 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.notifications.Accumulator, config.StateStream, /*stateStream=*/ false, - config.HistoryV3, dirs, blockReader, backend.sentriesClient.Hd, @@ -719,7 +710,6 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.notifications.Accumulator, config.StateStream, /*stateStream=*/ false, - config.HistoryV3, dirs, blockReader, backend.sentriesClient.Hd, @@ -895,7 +885,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger checkStateRoot := true pipelineStages := stages2.NewPipelineStages(ctx, backend.chainDB, config, p2pConfig, backend.sentriesClient, backend.notifications, backend.downloaderClient, blockReader, blockRetire, backend.agg, backend.silkworm, backend.forkValidator, logger, checkStateRoot) backend.pipelineStagedSync = stagedsync.New(config.Sync, pipelineStages, stagedsync.PipelineUnwindOrder, stagedsync.PipelinePruneOrder, logger) - backend.eth1ExecutionServer = eth1.NewEthereumExecutionModule(blockReader, backend.chainDB, backend.pipelineStagedSync, backend.forkValidator, chainConfig, assembleBlockPOS, hook, backend.notifications.Accumulator, backend.notifications.StateChangesConsumer, logger, backend.engine, config.HistoryV3, config.Sync, ctx) + backend.eth1ExecutionServer = eth1.NewEthereumExecutionModule(blockReader, backend.chainDB, backend.pipelineStagedSync, backend.forkValidator, chainConfig, assembleBlockPOS, hook, backend.notifications.Accumulator, backend.notifications.StateChangesConsumer, logger, backend.engine, config.Sync, ctx) executionRpc := direct.NewExecutionClientDirect(backend.eth1ExecutionServer) var executionEngine executionclient.ExecutionEngine @@ -1434,7 +1424,7 @@ func (s *Ethereum) setUpSnapDownloader(ctx context.Context, downloaderCfg *downl return err } -func setUpBlockReader(ctx context.Context, db kv.RwDB, dirs datadir.Dirs, snConfig *ethconfig.Config, histV3 bool, isBor bool, logger log.Logger) (services.FullBlockReader, *blockio.BlockWriter, *freezeblocks.RoSnapshots, *freezeblocks.BorRoSnapshots, *libstate.Aggregator, error) { +func setUpBlockReader(ctx context.Context, db kv.RwDB, dirs datadir.Dirs, snConfig *ethconfig.Config, isBor bool, logger log.Logger) (services.FullBlockReader, *blockio.BlockWriter, *freezeblocks.RoSnapshots, *freezeblocks.BorRoSnapshots, *libstate.Aggregator, error) { var minFrozenBlock uint64 if frozenLimit := snConfig.Sync.FrozenBlockLimit; frozenLimit != 0 { @@ -1473,7 +1463,7 @@ func setUpBlockReader(ctx context.Context, db kv.RwDB, dirs datadir.Dirs, snConf } blockReader := freezeblocks.NewBlockReader(allSnapshots, allBorSnapshots) - blockWriter := blockio.NewBlockWriter(histV3) + blockWriter := blockio.NewBlockWriter() return blockReader, blockWriter, allSnapshots, allBorSnapshots, agg, nil } diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index ed96688de37..4550d65aaa6 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -72,7 +72,6 @@ var LightClientGPO = gaspricecfg.Config{ var Defaults = Config{ Sync: Sync{ UseSnapshots: true, - HistoryV3: true, ExecWorkerCount: estimate.ReconstituteState.WorkersHalf(), //only half of CPU, other half will spend for snapshots build/merge/prune ReconWorkerCount: estimate.ReconstituteState.Workers(), BodyCacheLimit: 256 * 1024 * 1024, @@ -271,9 +270,6 @@ type Config struct { type Sync struct { UseSnapshots bool - // New DB and Snapshots format of history allows: parallel blocks execution, get state as of given transaction without executing whole block.", - HistoryV3 bool - // LoopThrottle sets a minimum time between staged loop iterations LoopThrottle time.Duration ExecWorkerCount int diff --git a/eth/stagedsync/default_stages.go b/eth/stagedsync/default_stages.go index 3b17c8110d2..9db7778f312 100644 --- a/eth/stagedsync/default_stages.go +++ b/eth/stagedsync/default_stages.go @@ -6,7 +6,6 @@ import ( "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/common/dbg" - "github.com/ledgerwatch/erigon-lib/config3" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/wrap" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" @@ -164,7 +163,7 @@ func DefaultStages(ctx context.Context, { ID: stages.IntermediateHashes, Description: "Generate intermediate hashes and computing state root", - Disabled: bodies.historyV3 || config3.EnableHistoryV4InTest || dbg.StagesOnlyBlocks, + Disabled: true, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { if exec.chainConfig.IsOsaka(0) { _, err := SpawnVerkleTrie(s, u, txc.Tx, trieCfg, ctx, logger) @@ -187,7 +186,7 @@ func DefaultStages(ctx context.Context, ID: stages.CallTraces, Description: "Generate call traces index", DisabledDescription: "Work In Progress", - Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks, + Disabled: true, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { return SpawnCallTraces(s, txc.Tx, callTraces, ctx, logger) }, @@ -201,7 +200,7 @@ func DefaultStages(ctx context.Context, { ID: stages.AccountHistoryIndex, Description: "Generate account history index", - Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks, + Disabled: true, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { return SpawnAccountHistoryIndex(s, txc.Tx, history, ctx, logger) }, @@ -215,7 +214,7 @@ func DefaultStages(ctx context.Context, { ID: stages.StorageHistoryIndex, Description: "Generate storage history index", - Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks, + Disabled: true, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { return SpawnStorageHistoryIndex(s, txc.Tx, history, ctx, logger) }, @@ -229,7 +228,7 @@ func DefaultStages(ctx context.Context, { ID: stages.LogIndex, Description: "Generate receipt logs index", - Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks, + Disabled: true, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { return SpawnLogIndex(s, txc.Tx, logIndex, ctx, 0, logger) }, @@ -539,7 +538,7 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers { ID: stages.HashState, Description: "Hash the key in the state", - Disabled: exec.historyV3 && config3.EnableHistoryV4InTest, + Disabled: true, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { return SpawnHashStateStage(s, txc.Tx, hashState, ctx, logger) }, @@ -553,7 +552,7 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers { ID: stages.IntermediateHashes, Description: "Generate intermediate hashes and computing state root", - Disabled: exec.historyV3 && config3.EnableHistoryV4InTest, + Disabled: true, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { if exec.chainConfig.IsOsaka(0) { _, err := SpawnVerkleTrie(s, u, txc.Tx, trieCfg, ctx, logger) @@ -576,7 +575,7 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers ID: stages.CallTraces, Description: "Generate call traces index", DisabledDescription: "Work In Progress", - Disabled: exec.historyV3, + Disabled: true, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { return SpawnCallTraces(s, txc.Tx, callTraces, ctx, logger) }, @@ -590,7 +589,7 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers { ID: stages.AccountHistoryIndex, Description: "Generate account history index", - Disabled: exec.historyV3, + Disabled: true, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { return SpawnAccountHistoryIndex(s, txc.Tx, history, ctx, logger) }, @@ -714,7 +713,7 @@ func StateStages(ctx context.Context, headers HeadersCfg, bodies BodiesCfg, bloc { ID: stages.HashState, Description: "Hash the key in the state", - Disabled: bodies.historyV3, + Disabled: true, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { return SpawnHashStateStage(s, txc.Tx, hashState, ctx, logger) }, @@ -725,7 +724,7 @@ func StateStages(ctx context.Context, headers HeadersCfg, bodies BodiesCfg, bloc { ID: stages.IntermediateHashes, Description: "Generate intermediate hashes and computing state root", - Disabled: bodies.historyV3, + Disabled: true, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { _, err := SpawnIntermediateHashesStage(s, u, txc.Tx, trieCfg, ctx, logger) return err diff --git a/eth/stagedsync/exec3.go b/eth/stagedsync/exec3.go index 9b7571062b1..b5fc938faa1 100644 --- a/eth/stagedsync/exec3.go +++ b/eth/stagedsync/exec3.go @@ -28,7 +28,6 @@ import ( "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon-lib/metrics" @@ -981,39 +980,6 @@ Loop: // nolint func dumpPlainStateDebug(tx kv.RwTx, doms *state2.SharedDomains) { - blockNum, err := stages.GetStageProgress(tx, stages.Execution) - if err != nil { - panic(err) - } - histV3, err := kvcfg.HistoryV3.Enabled(tx) - if err != nil { - panic(err) - } - fmt.Printf("[dbg] plain state: %d\n", blockNum) - defer fmt.Printf("[dbg] plain state end\n") - - if !histV3 { - if err := tx.ForEach(kv.PlainState, nil, func(k, v []byte) error { - if len(k) == 20 { - a := accounts.NewAccount() - a.DecodeForStorage(v) - fmt.Printf("%x, %d, %d, %d, %x\n", k, &a.Balance, a.Nonce, a.Incarnation, a.CodeHash) - } - return nil - }); err != nil { - panic(err) - } - if err := tx.ForEach(kv.PlainState, nil, func(k, v []byte) error { - if len(k) > 20 { - fmt.Printf("%x, %x\n", k, v) - } - return nil - }); err != nil { - panic(err) - } - return - } - if doms != nil { doms.Flush(context.Background(), tx) } diff --git a/eth/stagedsync/stage_bodies.go b/eth/stagedsync/stage_bodies.go index f600e85086b..076d9cdde94 100644 --- a/eth/stagedsync/stage_bodies.go +++ b/eth/stagedsync/stage_bodies.go @@ -36,7 +36,6 @@ type BodiesCfg struct { chanConfig chain.Config blockReader services.FullBlockReader blockWriter *blockio.BlockWriter - historyV3 bool loopBreakCheck func(int) bool } @@ -45,13 +44,12 @@ func StageBodiesCfg(db kv.RwDB, bd *bodydownload.BodyDownload, blockPropagator adapter.BlockPropagator, timeout int, chanConfig chain.Config, blockReader services.FullBlockReader, - historyV3 bool, blockWriter *blockio.BlockWriter, loopBreakCheck func(int) bool) BodiesCfg { return BodiesCfg{ db: db, bd: bd, bodyReqSend: bodyReqSend, penalise: penalise, blockPropagator: blockPropagator, timeout: timeout, chanConfig: chanConfig, blockReader: blockReader, - historyV3: historyV3, blockWriter: blockWriter, loopBreakCheck: loopBreakCheck} + blockWriter: blockWriter, loopBreakCheck: loopBreakCheck} } // BodiesForward progresses Bodies stage in the forward direction @@ -247,7 +245,7 @@ func BodiesForward( if err != nil { return false, fmt.Errorf("WriteRawBodyIfNotExists: %w", err) } - if cfg.historyV3 && ok { + if ok { if err := rawdb.AppendCanonicalTxNums(tx, blockHeight); err != nil { return false, err } diff --git a/eth/stagedsync/stage_call_traces_test.go b/eth/stagedsync/stage_call_traces_test.go index 0de80a9a9e7..76333931d23 100644 --- a/eth/stagedsync/stage_call_traces_test.go +++ b/eth/stagedsync/stage_call_traces_test.go @@ -33,12 +33,11 @@ func genTestCallTraceSet(t *testing.T, tx kv.RwTx, to uint64) { } func TestCallTrace(t *testing.T) { + t.Skip("this stage is disabled in E3") + logger := log.New() ctx, require := context.Background(), require.New(t) - histV3, db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) - if histV3 { - t.Skip() - } + db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) tx, err := db.BeginRw(context.Background()) require.NoError(err) defer tx.Rollback() diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index 7d6b1b2c8ec..6dc3fc44309 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -2,7 +2,6 @@ package stagedsync import ( "context" - "encoding/binary" "errors" "fmt" "os" @@ -20,21 +19,15 @@ import ( "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/common/dbg" - "github.com/ledgerwatch/erigon-lib/common/hexutility" - "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/common/metrics" "github.com/ledgerwatch/erigon-lib/diagnostics" - "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/dbutils" "github.com/ledgerwatch/erigon-lib/kv/membatch" "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" - "github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2" libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon-lib/wrap" - "github.com/ledgerwatch/erigon/common/changeset" - "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" @@ -107,7 +100,6 @@ func StageExecuteBlocksCfg( stateStream bool, badBlockHalt bool, - historyV3 bool, dirs datadir.Dirs, blockReader services.FullBlockReader, hd headerDownloader, @@ -131,7 +123,7 @@ func StageExecuteBlocksCfg( blockReader: blockReader, hd: hd, genesis: genesis, - historyV3: historyV3, + historyV3: true, syncCfg: syncCfg, agg: agg, silkworm: silkworm, @@ -829,10 +821,6 @@ func UnwindExecutionStage(u *UnwindState, s *StageState, txc wrap.TxContainer, c } func unwindExecutionStage(u *UnwindState, s *StageState, txc wrap.TxContainer, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool, logger log.Logger) error { - logPrefix := s.LogPrefix() - stateBucket := kv.PlainState - storageKeyLength := length.Addr + length.Incarnation + length.Hash - var accumulator *shards.Accumulator if cfg.stateStream && s.BlockNumber-u.UnwindPoint < stateStreamLimit { accumulator = cfg.accumulator @@ -849,121 +837,7 @@ func unwindExecutionStage(u *UnwindState, s *StageState, txc wrap.TxContainer, c } //TODO: why we don't call accumulator.ChangeCode??? - if cfg.historyV3 { - return unwindExec3(u, s, txc, ctx, accumulator, logger) - } - - changes := etl.NewCollector(logPrefix, cfg.dirs.Tmp, etl.NewOldestEntryBuffer(etl.BufferOptimalSize), logger) - defer changes.Close() - errRewind := changeset.RewindData(txc.Tx, s.BlockNumber, u.UnwindPoint, changes, ctx.Done()) - if errRewind != nil { - return fmt.Errorf("getting rewind data: %w", errRewind) - } - - if err := changes.Load(txc.Tx, stateBucket, func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { - if len(k) == 20 { - if len(v) > 0 { - var acc accounts.Account - if err := acc.DecodeForStorage(v); err != nil { - return err - } - - // Fetch the code hash - recoverCodeHashPlain(&acc, txc.Tx, k) - var address common.Address - copy(address[:], k) - - // cleanup contract code bucket - original, err := state.NewPlainStateReader(txc.Tx).ReadAccountData(address) - if err != nil { - return fmt.Errorf("read account for %x: %w", address, err) - } - if original != nil { - // clean up all the code incarnations original incarnation and the new one - for incarnation := original.Incarnation; incarnation > acc.Incarnation && incarnation > 0; incarnation-- { - err = txc.Tx.Delete(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(address[:], incarnation)) - if err != nil { - return fmt.Errorf("writeAccountPlain for %x: %w", address, err) - } - } - } - - newV := make([]byte, acc.EncodingLengthForStorage()) - acc.EncodeForStorage(newV) - if accumulator != nil { - accumulator.ChangeAccount(address, acc.Incarnation, newV) - } - if err := next(k, k, newV); err != nil { - return err - } - } else { - if accumulator != nil { - var address common.Address - copy(address[:], k) - accumulator.DeleteAccount(address) - } - if err := next(k, k, nil); err != nil { - return err - } - } - return nil - } - if accumulator != nil { - var address common.Address - var incarnation uint64 - var location common.Hash - copy(address[:], k[:length.Addr]) - incarnation = binary.BigEndian.Uint64(k[length.Addr:]) - copy(location[:], k[length.Addr+length.Incarnation:]) - logger.Debug(fmt.Sprintf("un ch st: %x, %d, %x, %x\n", address, incarnation, location, common.Copy(v))) - accumulator.ChangeStorage(address, incarnation, location, common.Copy(v)) - } - if len(v) > 0 { - if err := next(k, k[:storageKeyLength], v); err != nil { - return err - } - } else { - if err := next(k, k[:storageKeyLength], nil); err != nil { - return err - } - } - return nil - - }, etl.TransformArgs{Quit: ctx.Done()}); err != nil { - return err - } - - if err := historyv2.Truncate(txc.Tx, u.UnwindPoint+1); err != nil { - return err - } - - if err := rawdb.TruncateReceipts(txc.Tx, u.UnwindPoint+1); err != nil { - return fmt.Errorf("truncate receipts: %w", err) - } - if err := rawdb.TruncateBorReceipts(txc.Tx, u.UnwindPoint+1); err != nil { - return fmt.Errorf("truncate bor receipts: %w", err) - } - if err := rawdb.DeleteNewerEpochs(txc.Tx, u.UnwindPoint+1); err != nil { - return fmt.Errorf("delete newer epochs: %w", err) - } - - // Truncate CallTraceSet - keyStart := hexutility.EncodeTs(u.UnwindPoint + 1) - c, err := txc.Tx.RwCursorDupSort(kv.CallTraceSet) - if err != nil { - return err - } - defer c.Close() - for k, _, err := c.Seek(keyStart); k != nil; k, _, err = c.NextNoDup() { - if err != nil { - return err - } - if err = txc.Tx.Delete(kv.CallTraceSet, k); err != nil { - return err - } - } - - return nil + return unwindExec3(u, s, txc, ctx, accumulator, logger) } func recoverCodeHashPlain(acc *accounts.Account, db kv.Tx, key []byte) { @@ -977,7 +851,6 @@ func recoverCodeHashPlain(acc *accounts.Account, db kv.Tx, key []byte) { } func PruneExecutionStage(s *PruneState, tx kv.RwTx, cfg ExecuteBlockCfg, ctx context.Context, initialCycle bool) (err error) { - logPrefix := s.LogPrefix() useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -990,42 +863,12 @@ func PruneExecutionStage(s *PruneState, tx kv.RwTx, cfg ExecuteBlockCfg, ctx con logEvery := time.NewTicker(logInterval) defer logEvery.Stop() - if cfg.historyV3 { - pruneTimeout := 3 * time.Second - if initialCycle { - pruneTimeout = 12 * time.Hour - } - if _, err = tx.(*temporal.Tx).AggTx().(*libstate.AggregatorRoTx).PruneSmallBatches(ctx, pruneTimeout, tx); err != nil { // prune part of retired data, before commit - return err - } - } else { - if cfg.prune.History.Enabled() { - if err = rawdb.PruneTableDupSort(tx, kv.AccountChangeSet, logPrefix, cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { - return err - } - if err = rawdb.PruneTableDupSort(tx, kv.StorageChangeSet, logPrefix, cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { - return err - } - } - - if cfg.prune.Receipts.Enabled() { - if err = rawdb.PruneTable(tx, kv.Receipts, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { - return err - } - if err = rawdb.PruneTable(tx, kv.BorReceipts, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxUint32); err != nil { - return err - } - // EDIT: Don't prune yet, let LogIndex stage take care of it - // LogIndex.Prune will read everything what not pruned here - // if err = rawdb.PruneTable(tx, kv.Log, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { - // return err - // } - } - if cfg.prune.CallTraces.Enabled() { - if err = rawdb.PruneTableDupSort(tx, kv.CallTraceSet, logPrefix, cfg.prune.CallTraces.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { - return err - } - } + pruneTimeout := 3 * time.Second + if initialCycle { + pruneTimeout = 12 * time.Hour + } + if _, err = tx.(*temporal.Tx).AggTx().(*libstate.AggregatorRoTx).PruneSmallBatches(ctx, pruneTimeout, tx); err != nil { // prune part of retired data, before commit + return err } if err = s.Done(tx); err != nil { diff --git a/eth/stagedsync/stage_hashstate.go b/eth/stagedsync/stage_hashstate.go index 6eefc047807..091e74bfcab 100644 --- a/eth/stagedsync/stage_hashstate.go +++ b/eth/stagedsync/stage_hashstate.go @@ -33,15 +33,12 @@ import ( type HashStateCfg struct { db kv.RwDB dirs datadir.Dirs - - historyV3 bool } -func StageHashStateCfg(db kv.RwDB, dirs datadir.Dirs, historyV3 bool) HashStateCfg { +func StageHashStateCfg(db kv.RwDB, dirs datadir.Dirs) HashStateCfg { return HashStateCfg{ - db: db, - dirs: dirs, - historyV3: historyV3, + db: db, + dirs: dirs, } } @@ -126,25 +123,13 @@ func unwindHashStateStageImpl(logPrefix string, u *UnwindState, s *StageState, t // Currently it does not require unwinding because it does not create any Intermediate Hash records // and recomputes the state root from scratch prom := NewPromoter(tx, cfg.dirs, ctx, logger) - if cfg.historyV3 { - if err := prom.UnwindOnHistoryV3(logPrefix, s.BlockNumber, u.UnwindPoint, false, true); err != nil { - return err - } - if err := prom.UnwindOnHistoryV3(logPrefix, s.BlockNumber, u.UnwindPoint, false, false); err != nil { - return err - } - if err := prom.UnwindOnHistoryV3(logPrefix, s.BlockNumber, u.UnwindPoint, true, false); err != nil { - return err - } - return nil - } - if err := prom.Unwind(logPrefix, s, u, false /* storage */, true /* codes */); err != nil { + if err := prom.UnwindOnHistoryV3(logPrefix, s.BlockNumber, u.UnwindPoint, false, true); err != nil { return err } - if err := prom.Unwind(logPrefix, s, u, false /* storage */, false /* codes */); err != nil { + if err := prom.UnwindOnHistoryV3(logPrefix, s.BlockNumber, u.UnwindPoint, false, false); err != nil { return err } - if err := prom.Unwind(logPrefix, s, u, true /* storage */, false /* codes */); err != nil { + if err := prom.UnwindOnHistoryV3(logPrefix, s.BlockNumber, u.UnwindPoint, true, false); err != nil { return err } return nil @@ -901,23 +886,10 @@ func (p *Promoter) Unwind(logPrefix string, s *StageState, u *UnwindState, stora func promoteHashedStateIncrementally(logPrefix string, from, to uint64, tx kv.RwTx, cfg HashStateCfg, ctx context.Context, logger log.Logger) error { prom := NewPromoter(tx, cfg.dirs, ctx, logger) - if cfg.historyV3 { - if err := prom.PromoteOnHistoryV3(logPrefix, from, to, false); err != nil { - return err - } - if err := prom.PromoteOnHistoryV3(logPrefix, from, to, true); err != nil { - return err - } - return nil - } - - if err := prom.Promote(logPrefix, from, to, false, true); err != nil { - return err - } - if err := prom.Promote(logPrefix, from, to, false, false); err != nil { + if err := prom.PromoteOnHistoryV3(logPrefix, from, to, false); err != nil { return err } - if err := prom.Promote(logPrefix, from, to, true, false); err != nil { + if err := prom.PromoteOnHistoryV3(logPrefix, from, to, true); err != nil { return err } return nil diff --git a/eth/stagedsync/stage_hashstate_test.go b/eth/stagedsync/stage_hashstate_test.go index ec037d7c196..35eb5a412b4 100644 --- a/eth/stagedsync/stage_hashstate_test.go +++ b/eth/stagedsync/stage_hashstate_test.go @@ -21,14 +21,13 @@ func TestPromoteHashedStateClearState(t *testing.T) { } logger := log.New() dirs := datadir.New(t.TempDir()) - historyV3 := false _, tx1 := memdb.NewTestTx(t) db2, tx2 := memdb.NewTestTx(t) generateBlocks(t, 1, 50, hashedWriterGen(tx1), changeCodeWithIncarnations) generateBlocks(t, 1, 50, plainWriterGen(tx2), changeCodeWithIncarnations) - err := PromoteHashedStateCleanly("logPrefix", tx2, StageHashStateCfg(db2, dirs, historyV3), context.Background(), logger) + err := PromoteHashedStateCleanly("logPrefix", tx2, StageHashStateCfg(db2, dirs), context.Background(), logger) if err != nil { t.Errorf("error while promoting state: %v", err) } @@ -42,14 +41,13 @@ func TestPromoteHashedStateIncremental(t *testing.T) { } logger := log.New() dirs := datadir.New(t.TempDir()) - historyV3 := false _, tx1 := memdb.NewTestTx(t) db2, tx2 := memdb.NewTestTx(t) generateBlocks(t, 1, 50, hashedWriterGen(tx1), changeCodeWithIncarnations) generateBlocks(t, 1, 50, plainWriterGen(tx2), changeCodeWithIncarnations) - cfg := StageHashStateCfg(db2, dirs, historyV3) + cfg := StageHashStateCfg(db2, dirs) err := PromoteHashedStateCleanly("logPrefix", tx2, cfg, context.Background(), logger) if err != nil { t.Errorf("error while promoting state: %v", err) @@ -72,7 +70,6 @@ func TestPromoteHashedStateIncrementalMixed(t *testing.T) { } logger := log.New() dirs := datadir.New(t.TempDir()) - historyV3 := false _, tx1 := memdb.NewTestTx(t) db2, tx2 := memdb.NewTestTx(t) @@ -80,7 +77,7 @@ func TestPromoteHashedStateIncrementalMixed(t *testing.T) { generateBlocks(t, 1, 50, hashedWriterGen(tx2), changeCodeWithIncarnations) generateBlocks(t, 51, 50, plainWriterGen(tx2), changeCodeWithIncarnations) - err := promoteHashedStateIncrementally("logPrefix", 50, 101, tx2, StageHashStateCfg(db2, dirs, historyV3), context.Background(), logger) + err := promoteHashedStateIncrementally("logPrefix", 50, 101, tx2, StageHashStateCfg(db2, dirs), context.Background(), logger) if err != nil { t.Errorf("error while promoting state: %v", err) } @@ -93,20 +90,19 @@ func TestUnwindHashed(t *testing.T) { } logger := log.New() dirs := datadir.New(t.TempDir()) - historyV3 := false _, tx1 := memdb.NewTestTx(t) db2, tx2 := memdb.NewTestTx(t) generateBlocks(t, 1, 50, hashedWriterGen(tx1), changeCodeWithIncarnations) generateBlocks(t, 1, 50, plainWriterGen(tx2), changeCodeWithIncarnations) - err := PromoteHashedStateCleanly("logPrefix", tx2, StageHashStateCfg(db2, dirs, historyV3), context.Background(), logger) + err := PromoteHashedStateCleanly("logPrefix", tx2, StageHashStateCfg(db2, dirs), context.Background(), logger) if err != nil { t.Errorf("error while promoting state: %v", err) } u := &UnwindState{UnwindPoint: 50} s := &StageState{BlockNumber: 100} - err = unwindHashStateStageImpl("logPrefix", u, s, tx2, StageHashStateCfg(db2, dirs, historyV3), context.Background(), logger) + err = unwindHashStateStageImpl("logPrefix", u, s, tx2, StageHashStateCfg(db2, dirs), context.Background(), logger) if err != nil { t.Errorf("error while unwind state: %v", err) } @@ -118,7 +114,6 @@ func TestPromoteIncrementallyShutdown(t *testing.T) { if config3.EnableHistoryV4InTest { t.Skip("e3: doesn't have this stage") } - historyV3 := false tt := []struct { name string @@ -140,7 +135,7 @@ func TestPromoteIncrementallyShutdown(t *testing.T) { } db, tx := memdb.NewTestTx(t) generateBlocks(t, 1, 10, plainWriterGen(tx), changeCodeWithIncarnations) - if err := promoteHashedStateIncrementally("logPrefix", 1, 10, tx, StageHashStateCfg(db, dirs, historyV3), ctx, log.New()); !errors.Is(err, tc.errExp) { + if err := promoteHashedStateIncrementally("logPrefix", 1, 10, tx, StageHashStateCfg(db, dirs), ctx, log.New()); !errors.Is(err, tc.errExp) { t.Errorf("error does not match expected error while shutdown promoteHashedStateIncrementally, got: %v, expected: %v", err, tc.errExp) } }) @@ -154,7 +149,6 @@ func TestPromoteHashedStateCleanlyShutdown(t *testing.T) { t.Skip("e3: doesn't have this stage") } logger := log.New() - historyV3 := false tt := []struct { name string @@ -180,7 +174,7 @@ func TestPromoteHashedStateCleanlyShutdown(t *testing.T) { generateBlocks(t, 1, 10, plainWriterGen(tx), changeCodeWithIncarnations) - if err := PromoteHashedStateCleanly("logPrefix", tx, StageHashStateCfg(db, dirs, historyV3), ctx, logger); !errors.Is(err, tc.errExp) { + if err := PromoteHashedStateCleanly("logPrefix", tx, StageHashStateCfg(db, dirs), ctx, logger); !errors.Is(err, tc.errExp) { t.Errorf("error does not match expected error while shutdown promoteHashedStateCleanly , got: %v, expected: %v", err, tc.errExp) } @@ -193,7 +187,6 @@ func TestUnwindHashStateShutdown(t *testing.T) { t.Skip("e3: doesn't have this stage") } logger := log.New() - historyV3 := false tt := []struct { name string cancelFuncExec bool @@ -217,7 +210,7 @@ func TestUnwindHashStateShutdown(t *testing.T) { db, tx := memdb.NewTestTx(t) generateBlocks(t, 1, 10, plainWriterGen(tx), changeCodeWithIncarnations) - cfg := StageHashStateCfg(db, dirs, historyV3) + cfg := StageHashStateCfg(db, dirs) err := PromoteHashedStateCleanly("logPrefix", tx, cfg, ctx, logger) if tc.cancelFuncExec { require.ErrorIs(t, err, context.Canceled) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 1e57d1aa7ca..970bc698965 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -47,7 +47,6 @@ type HeadersCfg struct { batchSize datasize.ByteSize noP2PDiscovery bool tmpdir string - historyV3 bool blockReader services.FullBlockReader blockWriter *blockio.BlockWriter @@ -71,7 +70,6 @@ func StageHeadersCfg( blockReader services.FullBlockReader, blockWriter *blockio.BlockWriter, tmpdir string, - historyV3 bool, notifications *shards.Notifications, loopBreakCheck func(int) bool) HeadersCfg { return HeadersCfg{ @@ -85,7 +83,6 @@ func StageHeadersCfg( penalize: penalize, batchSize: batchSize, tmpdir: tmpdir, - historyV3: historyV3, noP2PDiscovery: noP2PDiscovery, blockReader: blockReader, blockWriter: blockWriter, @@ -327,29 +324,24 @@ Loop: timer.Stop() } if headerInserter.Unwind() { - if cfg.historyV3 { - unwindTo := headerInserter.UnwindPoint() - doms, err := state.NewSharedDomains(tx, logger) //TODO: if remove this line TestBlockchainHeaderchainReorgConsistency failing - if err != nil { - return err - } - defer doms.Close() + unwindTo := headerInserter.UnwindPoint() + doms, err := state.NewSharedDomains(tx, logger) //TODO: if remove this line TestBlockchainHeaderchainReorgConsistency failing + if err != nil { + return err + } + defer doms.Close() - allowedUnwindTo, ok, err := tx.(state.HasAggTx).AggTx().(*state.AggregatorRoTx).CanUnwindBeforeBlockNum(unwindTo, tx) - if err != nil { - return err - } - if !ok { - return fmt.Errorf("too far unwind. requested=%d, minAllowed=%d", unwindTo, allowedUnwindTo) - } - if err := u.UnwindTo(allowedUnwindTo, StagedUnwind, tx); err != nil { - return err - } - } else { - if err := u.UnwindTo(headerInserter.UnwindPoint(), StagedUnwind, tx); err != nil { - return err - } + allowedUnwindTo, ok, err := tx.(state.HasAggTx).AggTx().(*state.AggregatorRoTx).CanUnwindBeforeBlockNum(unwindTo, tx) + if err != nil { + return err + } + if !ok { + return fmt.Errorf("too far unwind. requested=%d, minAllowed=%d", unwindTo, allowedUnwindTo) } + if err := u.UnwindTo(allowedUnwindTo, StagedUnwind, tx); err != nil { + return err + } + } if headerInserter.GetHighest() != 0 { if !headerInserter.Unwind() { diff --git a/eth/stagedsync/stage_mining_exec.go b/eth/stagedsync/stage_mining_exec.go index 0d474e6ad1a..8cc2da1e96f 100644 --- a/eth/stagedsync/stage_mining_exec.go +++ b/eth/stagedsync/stage_mining_exec.go @@ -20,7 +20,6 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/metrics" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" types2 "github.com/ledgerwatch/erigon-lib/types" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core" @@ -88,16 +87,11 @@ func SpawnMiningExecStage(s *StageState, txc wrap.TxContainer, cfg MiningExecCfg txs := current.PreparedTxs noempty := true - histV3, _ := kvcfg.HistoryV3.Enabled(txc.Tx) var domains *state2.SharedDomains var ( stateReader state.StateReader ) - if histV3 { - stateReader = state.NewReaderV4(txc.Doms) - } else { - stateReader = state.NewPlainStateReader(txc.Tx) - } + stateReader = state.NewReaderV4(txc.Doms) ibs := state.New(stateReader) // Create an empty block based on temporary copied state for @@ -127,19 +121,15 @@ func SpawnMiningExecStage(s *StageState, txc wrap.TxContainer, cfg MiningExecCfg m := membatchwithdb.NewMemoryBatch(txc.Tx, cfg.tmpdir, logger) defer m.Rollback() - if histV3 { - var err error - domains, err = state2.NewSharedDomains(m, logger) - if err != nil { - return err - } - defer domains.Close() - simStateReader = state.NewReaderV4(domains) - simStateWriter = state.NewWriterV4(domains) - } else { - simStateReader = state.NewPlainStateReader(m) - simStateWriter = state.NewPlainStateWriterNoHistory(m) + var err error + domains, err = state2.NewSharedDomains(m, logger) + if err != nil { + return err } + defer domains.Close() + simStateReader = state.NewReaderV4(domains) + simStateWriter = state.NewWriterV4(domains) + executionAt, err := s.ExecutionAt(txc.Tx) if err != nil { return err @@ -207,10 +197,8 @@ func SpawnMiningExecStage(s *StageState, txc wrap.TxContainer, cfg MiningExecCfg if _, err = rawdb.WriteRawBodyIfNotExists(txc.Tx, block.Hash(), blockHeight, block.RawBody()); err != nil { return fmt.Errorf("cannot write body: %s", err) } - if histV3 { - if err := rawdb.AppendCanonicalTxNums(txc.Tx, blockHeight); err != nil { - return err - } + if err := rawdb.AppendCanonicalTxNums(txc.Tx, blockHeight); err != nil { + return err } if err := stages.SaveStageProgress(txc.Tx, kv.Headers, blockHeight); err != nil { return err diff --git a/eth/stagedsync/stage_snapshots.go b/eth/stagedsync/stage_snapshots.go index c243cf321fc..d489c977ab2 100644 --- a/eth/stagedsync/stage_snapshots.go +++ b/eth/stagedsync/stage_snapshots.go @@ -35,7 +35,6 @@ import ( "github.com/ledgerwatch/erigon-lib/etl" protodownloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloaderproto" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/core/rawdb" @@ -64,7 +63,6 @@ type SnapshotsCfg struct { blockReader services.FullBlockReader notifier *shards.Notifications - historyV3 bool caplin bool blobs bool agg *state.Aggregator @@ -82,7 +80,6 @@ func StageSnapshotsCfg(db kv.RwDB, snapshotDownloader protodownloader.DownloaderClient, blockReader services.FullBlockReader, notifier *shards.Notifications, - historyV3 bool, agg *state.Aggregator, caplin bool, blobs bool, @@ -97,7 +94,6 @@ func StageSnapshotsCfg(db kv.RwDB, snapshotDownloader: snapshotDownloader, blockReader: blockReader, notifier: notifier, - historyV3: historyV3, caplin: caplin, agg: agg, silkworm: silkworm, @@ -239,14 +235,14 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R } else { // Download only the snapshots that are for the header chain. - if err := snapshotsync.WaitForDownloader(ctx, s.LogPrefix() /*headerChain=*/, true, cfg.historyV3, cfg.blobs, cfg.prune, cstate, cfg.agg, tx, cfg.blockReader, &cfg.chainConfig, cfg.snapshotDownloader, s.state.StagesIdsList()); err != nil { + if err := snapshotsync.WaitForDownloader(ctx, s.LogPrefix() /*headerChain=*/, true, cfg.blobs, cfg.prune, cstate, cfg.agg, tx, cfg.blockReader, &cfg.chainConfig, cfg.snapshotDownloader, s.state.StagesIdsList()); err != nil { return err } if err := cfg.blockReader.Snapshots().ReopenSegments([]snaptype.Type{coresnaptype.Headers, coresnaptype.Bodies}, true); err != nil { return err } - if err := snapshotsync.WaitForDownloader(ctx, s.LogPrefix() /*headerChain=*/, false, cfg.historyV3, cfg.blobs, cfg.prune, cstate, cfg.agg, tx, cfg.blockReader, &cfg.chainConfig, cfg.snapshotDownloader, s.state.StagesIdsList()); err != nil { + if err := snapshotsync.WaitForDownloader(ctx, s.LogPrefix() /*headerChain=*/, false, cfg.blobs, cfg.prune, cstate, cfg.agg, tx, cfg.blockReader, &cfg.chainConfig, cfg.snapshotDownloader, s.state.StagesIdsList()); err != nil { return err } } @@ -266,21 +262,19 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R } } - if cfg.historyV3 { - indexWorkers := estimate.IndexSnapshot.Workers() - if err := cfg.agg.BuildOptionalMissedIndices(ctx, indexWorkers); err != nil { - return err - } - if err := cfg.agg.BuildMissedIndices(ctx, indexWorkers); err != nil { - return err - } - if cfg.notifier.Events != nil { - cfg.notifier.Events.OnNewSnapshot() - } + indexWorkers := estimate.IndexSnapshot.Workers() + if err := cfg.agg.BuildOptionalMissedIndices(ctx, indexWorkers); err != nil { + return err + } + if err := cfg.agg.BuildMissedIndices(ctx, indexWorkers); err != nil { + return err + } + if cfg.notifier.Events != nil { + cfg.notifier.Events.OnNewSnapshot() + } - if casted, ok := tx.(*temporal.Tx); ok { - casted.ForceReopenAggCtx() // otherwise next stages will not see just-indexed-files - } + if casted, ok := tx.(*temporal.Tx); ok { + casted.ForceReopenAggCtx() // otherwise next stages will not see just-indexed-files } frozenBlocks := cfg.blockReader.FrozenBlocks() @@ -383,40 +377,37 @@ func FillDBFromSnapshots(logPrefix string, ctx context.Context, tx kv.RwTx, dirs return err } - historyV3, err := kvcfg.HistoryV3.Enabled(tx) if err != nil { return err } - if historyV3 { - _ = tx.ClearBucket(kv.MaxTxNum) - if err := blockReader.IterateFrozenBodies(func(blockNum, baseTxNum, txAmount uint64) error { - select { - case <-ctx.Done(): - return ctx.Err() - case <-logEvery.C: - logger.Info(fmt.Sprintf("[%s] MaxTxNums index: %dk/%dk", logPrefix, blockNum/1000, blockReader.FrozenBlocks()/1000)) - default: - } - if baseTxNum+txAmount == 0 { - panic(baseTxNum + txAmount) //uint-underflow - } - maxTxNum := baseTxNum + txAmount - 1 + _ = tx.ClearBucket(kv.MaxTxNum) + if err := blockReader.IterateFrozenBodies(func(blockNum, baseTxNum, txAmount uint64) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-logEvery.C: + logger.Info(fmt.Sprintf("[%s] MaxTxNums index: %dk/%dk", logPrefix, blockNum/1000, blockReader.FrozenBlocks()/1000)) + default: + } + if baseTxNum+txAmount == 0 { + panic(baseTxNum + txAmount) //uint-underflow + } + maxTxNum := baseTxNum + txAmount - 1 - if err := rawdbv3.TxNums.Append(tx, blockNum, maxTxNum); err != nil { - return fmt.Errorf("%w. blockNum=%d, maxTxNum=%d", err, blockNum, maxTxNum) - } - return nil - }); err != nil { - return fmt.Errorf("build txNum => blockNum mapping: %w", err) + if err := rawdbv3.TxNums.Append(tx, blockNum, maxTxNum); err != nil { + return fmt.Errorf("%w. blockNum=%d, maxTxNum=%d", err, blockNum, maxTxNum) } - if blockReader.FrozenBlocks() > 0 { - if err := rawdb.AppendCanonicalTxNums(tx, blockReader.FrozenBlocks()+1); err != nil { - return err - } - } else { - if err := rawdb.AppendCanonicalTxNums(tx, 0); err != nil { - return err - } + return nil + }); err != nil { + return fmt.Errorf("build txNum => blockNum mapping: %w", err) + } + if blockReader.FrozenBlocks() > 0 { + if err := rawdb.AppendCanonicalTxNums(tx, blockReader.FrozenBlocks()+1); err != nil { + return err + } + } else { + if err := rawdb.AppendCanonicalTxNums(tx, 0); err != nil { + return err } } ac := agg.BeginFilesRo() diff --git a/eth/stagedsync/stage_trie3_test.go b/eth/stagedsync/stage_trie3_test.go index 7f45cb57f5c..46a85c84b58 100644 --- a/eth/stagedsync/stage_trie3_test.go +++ b/eth/stagedsync/stage_trie3_test.go @@ -18,10 +18,7 @@ import ( func TestRebuildPatriciaTrieBasedOnFiles(t *testing.T) { ctx := context.Background() dirs := datadir.New(t.TempDir()) - v3, db, agg := temporaltest.NewTestDB(t, dirs) - if !v3 { - t.Skip("this test is v3 only") - } + db, agg := temporaltest.NewTestDB(t, dirs) logger := log.New() tx, err := db.BeginRw(context.Background()) @@ -88,7 +85,8 @@ func TestRebuildPatriciaTrieBasedOnFiles(t *testing.T) { } // checkRoot is false since we do not pass blockReader and want to check root manually afterwards. - cfg := StageTrieCfg(db, false /* checkRoot */, true /* saveHashesToDb */, false /* badBlockHalt */, dirs.Tmp, nil, nil /* hd */, v3, agg) + historyV3 := true + cfg := StageTrieCfg(db, false /* checkRoot */, true /* saveHashesToDb */, false /* badBlockHalt */, dirs.Tmp, nil, nil /* hd */, historyV3, agg) rebuiltRoot, err := RebuildPatriciaTrieBasedOnFiles(tx, cfg, context.Background(), log.New()) require.NoError(t, err) diff --git a/migrations/commitment.go b/migrations/commitment.go index 8b8c3ef4149..2b9a7d1fb8e 100644 --- a/migrations/commitment.go +++ b/migrations/commitment.go @@ -9,7 +9,6 @@ import ( "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" libstate "github.com/ledgerwatch/erigon-lib/state" ) @@ -19,7 +18,8 @@ var SqueezeCommitmentFiles = Migration{ Name: "squeeze_commit_files", Up: func(db kv.RwDB, dirs datadir.Dirs, progress []byte, BeforeCommit Callback, logger log.Logger) (err error) { ctx := context.Background() - if !EnableSqueezeCommitmentFiles || !libstate.AggregatorSqueezeCommitmentValues || !kvcfg.HistoryV3.FromDB(db) { //nolint:staticcheck + + if !EnableSqueezeCommitmentFiles || !libstate.AggregatorSqueezeCommitmentValues { //nolint:staticcheck return db.Update(ctx, func(tx kv.RwTx) error { return BeforeCommit(tx, nil, true) }) diff --git a/p2p/sentry/sentry_grpc_server_test.go b/p2p/sentry/sentry_grpc_server_test.go index ac126697fcb..43cefaa98b0 100644 --- a/p2p/sentry/sentry_grpc_server_test.go +++ b/p2p/sentry/sentry_grpc_server_test.go @@ -83,8 +83,8 @@ func testForkIDSplit(t *testing.T, protocol uint) { SpuriousDragonBlock: big.NewInt(2), ByzantiumBlock: big.NewInt(3), } - _, dbNoFork, _ = temporaltest.NewTestDB(t, datadir.New(t.TempDir())) - _, dbProFork, _ = temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + dbNoFork, _ = temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + dbProFork, _ = temporaltest.NewTestDB(t, datadir.New(t.TempDir())) gspecNoFork = &types.Genesis{Config: configNoFork} gspecProFork = &types.Genesis{Config: configProFork} @@ -176,7 +176,7 @@ func TestSentryServerImpl_SetStatusInitPanic(t *testing.T) { }() configNoFork := &chain.Config{HomesteadBlock: big.NewInt(1), ChainID: big.NewInt(1)} - _, dbNoFork, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + dbNoFork, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) gspecNoFork := &types.Genesis{Config: configNoFork} genesisNoFork := core.MustCommitGenesis(gspecNoFork, dbNoFork, "", log.Root()) ss := &GrpcServer{p2p: &p2p.Config{}} diff --git a/p2p/sentry/sentry_multi_client/sentry_multi_client.go b/p2p/sentry/sentry_multi_client/sentry_multi_client.go index d00ee47cedf..f0ba178135d 100644 --- a/p2p/sentry/sentry_multi_client/sentry_multi_client.go +++ b/p2p/sentry/sentry_multi_client/sentry_multi_client.go @@ -26,8 +26,6 @@ import ( proto_sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentryproto" proto_types "github.com/ledgerwatch/erigon-lib/gointerfaces/typesproto" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" - "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/ethconfig" @@ -278,8 +276,7 @@ type MultiClient struct { // decouple sentry multi client from header and body downloading logic is done disableBlockDownload bool - historyV3 bool - logger log.Logger + logger log.Logger } func NewMultiClient( @@ -342,7 +339,6 @@ func NewMultiClient( logPeerInfo: logPeerInfo, sendHeaderRequestsToMultiplePeers: chainConfig.TerminalTotalDifficultyPassed, maxBlockBroadcastPeers: maxBlockBroadcastPeers, - historyV3: kvcfg.HistoryV3.FromDB(db), disableBlockDownload: disableBlockDownload, logger: logger, } @@ -686,47 +682,44 @@ func (cs *MultiClient) getBlockBodies66(ctx context.Context, inreq *proto_sentry } func (cs *MultiClient) getReceipts66(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry direct.SentryClient) error { - if cs.historyV3 { // historyV3 doesn't store receipts in DB - return nil - } - - var query eth.GetReceiptsPacket66 - if err := rlp.DecodeBytes(inreq.Data, &query); err != nil { - return fmt.Errorf("decoding getReceipts66: %w, data: %x", err, inreq.Data) - } - tx, err := cs.db.BeginRo(ctx) - if err != nil { - return err - } - defer tx.Rollback() - receipts, err := eth.AnswerGetReceiptsQuery(cs.blockReader, tx, query.GetReceiptsPacket) - if err != nil { - return err - } - tx.Rollback() - b, err := rlp.EncodeToBytes(ð.ReceiptsRLPPacket66{ - RequestId: query.RequestId, - ReceiptsRLPPacket: receipts, - }) - if err != nil { - return fmt.Errorf("encode header response: %w", err) - } - outreq := proto_sentry.SendMessageByIdRequest{ - PeerId: inreq.PeerId, - Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_RECEIPTS_66, - Data: b, - }, - } - _, err = sentry.SendMessageById(ctx, &outreq, &grpc.EmptyCallOption{}) - if err != nil { - if isPeerNotFoundErr(err) { - return nil - } - return fmt.Errorf("send bodies response: %w", err) - } - //cs.logger.Info(fmt.Sprintf("[%s] GetReceipts responseLen %d", ConvertH512ToPeerID(inreq.PeerId), len(b))) - return nil + return nil //TODO: https://github.com/ledgerwatch/erigon/issues/10320 + //var query eth.GetReceiptsPacket66 + //if err := rlp.DecodeBytes(inreq.Data, &query); err != nil { + // return fmt.Errorf("decoding getReceipts66: %w, data: %x", err, inreq.Data) + //} + //tx, err := cs.db.BeginRo(ctx) + //if err != nil { + // return err + //} + //defer tx.Rollback() + //receipts, err := eth.AnswerGetReceiptsQuery(cs.blockReader, tx, query.GetReceiptsPacket) + //if err != nil { + // return err + //} + //tx.Rollback() + //b, err := rlp.EncodeToBytes(ð.ReceiptsRLPPacket66{ + // RequestId: query.RequestId, + // ReceiptsRLPPacket: receipts, + //}) + //if err != nil { + // return fmt.Errorf("encode header response: %w", err) + //} + //outreq := proto_sentry.SendMessageByIdRequest{ + // PeerId: inreq.PeerId, + // Data: &proto_sentry.OutboundMessageData{ + // Id: proto_sentry.MessageId_RECEIPTS_66, + // Data: b, + // }, + //} + //_, err = sentry.SendMessageById(ctx, &outreq, &grpc.EmptyCallOption{}) + //if err != nil { + // if isPeerNotFoundErr(err) { + // return nil + // } + // return fmt.Errorf("send bodies response: %w", err) + //} + ////cs.logger.Info(fmt.Sprintf("[%s] GetReceipts responseLen %d", ConvertH512ToPeerID(inreq.PeerId), len(b))) + //return nil } func MakeInboundMessage() *proto_sentry.InboundMessage { diff --git a/tests/state_test.go b/tests/state_test.go index 4f0f549a4db..0b94123788a 100644 --- a/tests/state_test.go +++ b/tests/state_test.go @@ -58,7 +58,7 @@ func TestState(t *testing.T) { //if ethconfig.EnableHistoryV3InTest { //} - _, db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) st.walk(t, stateTestDir, func(t *testing.T, name string, test *StateTest) { for _, subtest := range test.Subtests() { subtest := subtest diff --git a/tests/state_test_util.go b/tests/state_test_util.go index 743abfecd74..3e3992d84ac 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -212,8 +212,8 @@ func (t *StateTest) RunNoVerify(tx kv.RwTx, subtest StateSubtest, vmconfig vm.Co defer domains.Close() txc.Doms = domains } - r = rpchelper.NewLatestStateReader(tx, config3.EnableHistoryV4InTest) - w = rpchelper.NewLatestStateWriter(txc, writeBlockNr, config3.EnableHistoryV4InTest) + r = rpchelper.NewLatestStateReader(tx) + w = rpchelper.NewLatestStateWriter(txc, writeBlockNr) statedb := state.New(r) var baseFee *big.Int @@ -327,7 +327,7 @@ func (t *StateTest) RunNoVerify(tx kv.RwTx, subtest StateSubtest, vmconfig vm.Co } func MakePreState(rules *chain.Rules, tx kv.RwTx, accounts types.GenesisAlloc, blockNr uint64, histV3 bool) (*state.IntraBlockState, error) { - r := rpchelper.NewLatestStateReader(tx, histV3) + r := rpchelper.NewLatestStateReader(tx) statedb := state.New(r) for addr, a := range accounts { statedb.SetCode(addr, a.Code) @@ -368,7 +368,7 @@ func MakePreState(rules *chain.Rules, tx kv.RwTx, accounts types.GenesisAlloc, b defer domains.Flush(context2.Background(), tx) txc.Doms = domains } - w = rpchelper.NewLatestStateWriter(txc, blockNr-1, histV3) + w = rpchelper.NewLatestStateWriter(txc, blockNr-1) // Commit and re-open to start with a clean state. if err := statedb.FinalizeTx(rules, w); err != nil { diff --git a/turbo/app/snapshots_cmd.go b/turbo/app/snapshots_cmd.go index ded60aaa3ef..9110e6c044e 100644 --- a/turbo/app/snapshots_cmd.go +++ b/turbo/app/snapshots_cmd.go @@ -33,7 +33,6 @@ import ( "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon-lib/metrics" @@ -616,7 +615,7 @@ func openSnaps(ctx context.Context, cfg ethconfig.BlocksFreezing, dirs datadir.D } blockReader := freezeblocks.NewBlockReader(blockSnaps, borSnaps) - blockWriter := blockio.NewBlockWriter(fromdb.HistV3(chainDB)) + blockWriter := blockio.NewBlockWriter() blockSnapBuildSema := semaphore.NewWeighted(int64(dbg.BuildSnapshotAllowance)) agg.SetSnapshotBuildSema(blockSnapBuildSema) @@ -810,10 +809,6 @@ func doRetireCommand(cliCtx *cli.Context) error { } } - if !kvcfg.HistoryV3.FromDB(db) { - return nil - } - db, err = temporal.New(db, agg) if err != nil { return err diff --git a/turbo/engineapi/engine_helpers/fork_validator.go b/turbo/engineapi/engine_helpers/fork_validator.go index 18e580f8426..2f152379bfd 100644 --- a/turbo/engineapi/engine_helpers/fork_validator.go +++ b/turbo/engineapi/engine_helpers/fork_validator.go @@ -23,7 +23,6 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon-lib/wrap" @@ -160,21 +159,16 @@ func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *t log.Debug("Execution ForkValidator.ValidatePayload", "extendCanonical", extendCanonical) if extendCanonical { - histV3, err := kvcfg.HistoryV3.Enabled(tx) - if err != nil { - return "", [32]byte{}, nil, err - } var txc wrap.TxContainer m := membatchwithdb.NewMemoryBatch(tx, fv.tmpDir, logger) defer m.Close() txc.Tx = m - if histV3 { - txc.Doms, err = state.NewSharedDomains(tx, logger) - if err != nil { - return "", [32]byte{}, nil, err - } - defer txc.Doms.Close() + var err error + txc.Doms, err = state.NewSharedDomains(tx, logger) + if err != nil { + return "", [32]byte{}, nil, err } + defer txc.Doms.Close() fv.extendingForkNotifications = &shards.Notifications{ Events: shards.NewEvents(), Accumulator: shards.NewAccumulator(), @@ -265,21 +259,15 @@ func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *t unwindPoint = 0 } var txc wrap.TxContainer - histV3, err := kvcfg.HistoryV3.Enabled(tx) - if err != nil { - return "", [32]byte{}, nil, err - } batch := membatchwithdb.NewMemoryBatch(tx, fv.tmpDir, logger) defer batch.Rollback() txc.Tx = batch - if histV3 { - sd, err := state.NewSharedDomains(tx, logger) - if err != nil { - return "", [32]byte{}, nil, err - } - defer sd.Close() - txc.Doms = sd + sd, err := state.NewSharedDomains(tx, logger) + if err != nil { + return "", [32]byte{}, nil, err } + defer sd.Close() + txc.Doms = sd notifications := &shards.Notifications{ Events: shards.NewEvents(), Accumulator: shards.NewAccumulator(), diff --git a/turbo/execution/eth1/ethereum_execution.go b/turbo/execution/eth1/ethereum_execution.go index 4d42e386830..06a8a4d1b1d 100644 --- a/turbo/execution/eth1/ethereum_execution.go +++ b/turbo/execution/eth1/ethereum_execution.go @@ -58,9 +58,8 @@ type EthereumExecutionModule struct { stateChangeConsumer shards.StateChangeConsumer // configuration - config *chain.Config - syncCfg ethconfig.Sync - historyV3 bool + config *chain.Config + syncCfg ethconfig.Sync // consensus engine consensus.Engine @@ -73,7 +72,7 @@ func NewEthereumExecutionModule(blockReader services.FullBlockReader, db kv.RwDB hook *stages.Hook, accumulator *shards.Accumulator, stateChangeConsumer shards.StateChangeConsumer, logger log.Logger, engine consensus.Engine, - historyV3 bool, syncCfg ethconfig.Sync, + syncCfg ethconfig.Sync, ctx context.Context, ) *EthereumExecutionModule { return &EthereumExecutionModule{ @@ -91,7 +90,6 @@ func NewEthereumExecutionModule(blockReader services.FullBlockReader, db kv.RwDB stateChangeConsumer: stateChangeConsumer, engine: engine, - historyV3: historyV3, syncCfg: syncCfg, bacgroundCtx: ctx, } diff --git a/turbo/execution/eth1/forkchoice.go b/turbo/execution/eth1/forkchoice.go index 8176b90c25d..1e5d1b2fe06 100644 --- a/turbo/execution/eth1/forkchoice.go +++ b/turbo/execution/eth1/forkchoice.go @@ -271,12 +271,10 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, original sendForkchoiceErrorWithoutWaiting(outcomeCh, err) return } - if e.historyV3 { - if err := rawdbv3.TxNums.Truncate(tx, currentParentNumber+1); err != nil { - //if err := rawdbv3.TxNums.Truncate(tx, fcuHeader.Number.Uint64()); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) - return - } + if err := rawdbv3.TxNums.Truncate(tx, currentParentNumber+1); err != nil { + //if err := rawdbv3.TxNums.Truncate(tx, fcuHeader.Number.Uint64()); err != nil { + sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + return } // Mark all new canonicals as canonicals for _, canonicalSegment := range newCanonicals { @@ -305,23 +303,15 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, original return } } - if e.historyV3 { - if len(newCanonicals) > 0 { - if err := rawdbv3.TxNums.Truncate(tx, newCanonicals[0].number); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) - return - } - if err := rawdb.AppendCanonicalTxNums(tx, newCanonicals[len(newCanonicals)-1].number); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) - return - } + if len(newCanonicals) > 0 { + if err := rawdbv3.TxNums.Truncate(tx, newCanonicals[0].number); err != nil { + sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + return + } + if err := rawdb.AppendCanonicalTxNums(tx, newCanonicals[len(newCanonicals)-1].number); err != nil { + sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + return } - //} else { - //if err := rawdbv3.TxNums.Truncate(tx, currentParentNumber+1); err != nil { - // sendForkchoiceErrorWithoutWaiting(outcomeCh, err) - // return - //} - //} } } diff --git a/turbo/jsonrpc/debug_api.go b/turbo/jsonrpc/debug_api.go index 0f24fc1973e..b2814a36643 100644 --- a/turbo/jsonrpc/debug_api.go +++ b/turbo/jsonrpc/debug_api.go @@ -13,7 +13,6 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" - "github.com/ledgerwatch/erigon/common/changeset" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types/accounts" @@ -23,7 +22,6 @@ import ( "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/adapter/ethapi" "github.com/ledgerwatch/erigon/turbo/rpchelper" - "github.com/ledgerwatch/erigon/turbo/transactions" ) // AccountRangeMaxResults is the maximum number of results to be returned per call @@ -68,37 +66,15 @@ func (api *PrivateDebugAPIImpl) StorageRangeAt(ctx context.Context, blockHash co } defer tx.Rollback() - chainConfig, err := api.chainConfig(ctx, tx) - if err != nil { - return StorageRangeResult{}, err - } - engine := api.engine() - - if api.historyV3(tx) { - number := rawdb.ReadHeaderNumber(tx, blockHash) - if number == nil { - return StorageRangeResult{}, fmt.Errorf("block not found") - } - minTxNum, err := rawdbv3.TxNums.Min(tx, *number) - if err != nil { - return StorageRangeResult{}, err - } - return storageRangeAtV3(tx.(kv.TemporalTx), contractAddress, keyStart, minTxNum+txIndex, maxResult) - } - - block, err := api.blockByHashWithSenders(ctx, tx, blockHash) - if err != nil { - return StorageRangeResult{}, err - } - if block == nil { - return StorageRangeResult{}, nil + number := rawdb.ReadHeaderNumber(tx, blockHash) + if number == nil { + return StorageRangeResult{}, fmt.Errorf("block not found") } - - _, _, _, _, stateReader, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, int(txIndex), api.historyV3(tx)) + minTxNum, err := rawdbv3.TxNums.Min(tx, *number) if err != nil { return StorageRangeResult{}, err } - return storageRangeAt(stateReader.(*state.PlainState), contractAddress, keyStart, maxResult) + return storageRangeAtV3(tx.(kv.TemporalTx), contractAddress, keyStart, minTxNum+txIndex, maxResult) } // AccountRange implements debug_accountRange. Returns a range of accounts involved in the given block rangeb @@ -141,7 +117,7 @@ func (api *PrivateDebugAPIImpl) AccountRange(ctx context.Context, blockNrOrHash maxResults = AccountRangeMaxResults } - dumper := state.NewDumper(tx, blockNumber, api.historyV3(tx)) + dumper := state.NewDumper(tx, blockNumber, true) res, err := dumper.IteratorDump(excludeCode, excludeStorage, common.BytesToAddress(startKey), maxResults) if err != nil { return state.IteratorDump{}, err @@ -194,18 +170,15 @@ func (api *PrivateDebugAPIImpl) GetModifiedAccountsByNumber(ctx context.Context, } //[from, to) - if api.historyV3(tx) { - startTxNum, err := rawdbv3.TxNums.Min(tx, startNum) - if err != nil { - return nil, err - } - endTxNum, err := rawdbv3.TxNums.Max(tx, endNum-1) - if err != nil { - return nil, err - } - return getModifiedAccountsV3(tx.(kv.TemporalTx), startTxNum, endTxNum) + startTxNum, err := rawdbv3.TxNums.Min(tx, startNum) + if err != nil { + return nil, err + } + endTxNum, err := rawdbv3.TxNums.Max(tx, endNum-1) + if err != nil { + return nil, err } - return changeset.GetModifiedAccounts(tx, startNum, endNum) + return getModifiedAccountsV3(tx.(kv.TemporalTx), startTxNum, endTxNum) } // getModifiedAccountsV3 returns a list of addresses that were modified in the block range @@ -274,18 +247,15 @@ func (api *PrivateDebugAPIImpl) GetModifiedAccountsByHash(ctx context.Context, s } //[from, to) - if api.historyV3(tx) { - startTxNum, err := rawdbv3.TxNums.Min(tx, startNum) - if err != nil { - return nil, err - } - endTxNum, err := rawdbv3.TxNums.Max(tx, endNum-1) - if err != nil { - return nil, err - } - return getModifiedAccountsV3(tx.(kv.TemporalTx), startTxNum, endTxNum) + startTxNum, err := rawdbv3.TxNums.Min(tx, startNum) + if err != nil { + return nil, err + } + endTxNum, err := rawdbv3.TxNums.Max(tx, endNum-1) + if err != nil { + return nil, err } - return changeset.GetModifiedAccounts(tx, startNum, endNum) + return getModifiedAccountsV3(tx.(kv.TemporalTx), startTxNum, endTxNum) } func (api *PrivateDebugAPIImpl) AccountAt(ctx context.Context, blockHash common.Hash, txIndex uint64, address common.Address) (*AccountResult, error) { @@ -295,69 +265,43 @@ func (api *PrivateDebugAPIImpl) AccountAt(ctx context.Context, blockHash common. } defer tx.Rollback() - if api.historyV3(tx) { - number := rawdb.ReadHeaderNumber(tx, blockHash) - if number == nil { - return nil, nil - } - canonicalHash, _ := api._blockReader.CanonicalHash(ctx, tx, *number) - isCanonical := canonicalHash == blockHash - if !isCanonical { - return nil, fmt.Errorf("block hash is not canonical") - } - - minTxNum, err := rawdbv3.TxNums.Min(tx, *number) - if err != nil { - return nil, err - } - ttx := tx.(kv.TemporalTx) - v, ok, err := ttx.DomainGetAsOf(kv.AccountsDomain, address[:], nil, minTxNum+txIndex+1) - if err != nil { - return nil, err - } - if !ok || len(v) == 0 { - return &AccountResult{}, nil - } - - var a accounts.Account - if err := accounts.DeserialiseV3(&a, v); err != nil { - return nil, err - } - result := &AccountResult{} - result.Balance.ToInt().Set(a.Balance.ToBig()) - result.Nonce = hexutil.Uint64(a.Nonce) - result.CodeHash = a.CodeHash - - code, _, err := ttx.DomainGetAsOf(kv.CodeDomain, address[:], nil, minTxNum+txIndex) - if err != nil { - return nil, err - } - result.Code = code - return result, nil + number := rawdb.ReadHeaderNumber(tx, blockHash) + if number == nil { + return nil, nil + } + canonicalHash, _ := api._blockReader.CanonicalHash(ctx, tx, *number) + isCanonical := canonicalHash == blockHash + if !isCanonical { + return nil, fmt.Errorf("block hash is not canonical") } - chainConfig, err := api.chainConfig(ctx, tx) + minTxNum, err := rawdbv3.TxNums.Min(tx, *number) if err != nil { return nil, err } - engine := api.engine() - - block, err := api.blockByHashWithSenders(ctx, tx, blockHash) + ttx := tx.(kv.TemporalTx) + v, ok, err := ttx.DomainGetAsOf(kv.AccountsDomain, address[:], nil, minTxNum+txIndex+1) if err != nil { return nil, err } - if block == nil { - return nil, nil + if !ok || len(v) == 0 { + return &AccountResult{}, nil } - _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, int(txIndex), api.historyV3(tx)) - if err != nil { + + var a accounts.Account + if err := accounts.DeserialiseV3(&a, v); err != nil { return nil, err } result := &AccountResult{} - result.Balance.ToInt().Set(ibs.GetBalance(address).ToBig()) - result.Nonce = hexutil.Uint64(ibs.GetNonce(address)) - result.Code = ibs.GetCode(address) - result.CodeHash = ibs.GetCodeHash(address) + result.Balance.ToInt().Set(a.Balance.ToBig()) + result.Nonce = hexutil.Uint64(a.Nonce) + result.CodeHash = a.CodeHash + + code, _, err := ttx.DomainGetAsOf(kv.CodeDomain, address[:], nil, minTxNum+txIndex) + if err != nil { + return nil, err + } + result.Code = code return result, nil } diff --git a/turbo/jsonrpc/erigon_block.go b/turbo/jsonrpc/erigon_block.go index 4e983520519..b545d8e30b9 100644 --- a/turbo/jsonrpc/erigon_block.go +++ b/turbo/jsonrpc/erigon_block.go @@ -1,7 +1,6 @@ package jsonrpc import ( - "bytes" "context" "errors" "fmt" @@ -11,11 +10,9 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" - "github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/core/rawdb" @@ -210,7 +207,7 @@ func (api *ErigonImpl) GetBalanceChangesInBlock(ctx context.Context, blockNrOrHa defer tx.Rollback() balancesMapping := make(map[common.Address]*hexutil.Big) - latestState, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), "") + latestState, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, "") if err != nil { return nil, err } @@ -220,70 +217,27 @@ func (api *ErigonImpl) GetBalanceChangesInBlock(ctx context.Context, blockNrOrHa return nil, err } - if api.historyV3(tx) { - minTxNum, _ := rawdbv3.TxNums.Min(tx, blockNumber) - it, err := tx.(kv.TemporalTx).HistoryRange(kv.AccountsHistory, int(minTxNum), -1, order.Asc, -1) - if err != nil { - return nil, err - } - defer it.Close() - for it.HasNext() { - addressBytes, v, err := it.Next() - if err != nil { - return nil, err - } - - var oldAcc accounts.Account - if len(v) > 0 { - if err = accounts.DeserialiseV3(&oldAcc, v); err != nil { - return nil, err - } - } - oldBalance := oldAcc.Balance - - address := common.BytesToAddress(addressBytes) - newAcc, err := latestState.ReadAccountData(address) - if err != nil { - return nil, err - } - - newBalance := uint256.NewInt(0) - if newAcc != nil { - newBalance = &newAcc.Balance - } - - if !oldBalance.Eq(newBalance) { - newBalanceDesc := (*hexutil.Big)(newBalance.ToBig()) - balancesMapping[address] = newBalanceDesc - } - } - } - - c, err := tx.Cursor(kv.AccountChangeSet) + minTxNum, _ := rawdbv3.TxNums.Min(tx, blockNumber) + it, err := tx.(kv.TemporalTx).HistoryRange(kv.AccountsHistory, int(minTxNum), -1, order.Asc, -1) if err != nil { return nil, err } - defer c.Close() - - startkey := hexutility.EncodeTs(blockNumber) - - decodeFn := historyv2.Mapper[kv.AccountChangeSet].Decode - - for dbKey, dbValue, err := c.Seek(startkey); bytes.Equal(dbKey, startkey) && dbKey != nil; dbKey, dbValue, err = c.Next() { - if err != nil { - return nil, err - } - _, addressBytes, v, err := decodeFn(dbKey, dbValue) + defer it.Close() + for it.HasNext() { + addressBytes, v, err := it.Next() if err != nil { return nil, err } + var oldAcc accounts.Account - if err = oldAcc.DecodeForStorage(v); err != nil { - return nil, err + if len(v) > 0 { + if err = accounts.DeserialiseV3(&oldAcc, v); err != nil { + return nil, err + } } oldBalance := oldAcc.Balance - address := common.BytesToAddress(addressBytes) + address := common.BytesToAddress(addressBytes) newAcc, err := latestState.ReadAccountData(address) if err != nil { return nil, err diff --git a/turbo/jsonrpc/eth_accounts.go b/turbo/jsonrpc/eth_accounts.go index 3d5d86c9b84..284c16f2bd2 100644 --- a/turbo/jsonrpc/eth_accounts.go +++ b/turbo/jsonrpc/eth_accounts.go @@ -27,7 +27,7 @@ func (api *APIImpl) GetBalance(ctx context.Context, address libcommon.Address, b return nil, fmt.Errorf("getBalance cannot open tx: %w", err1) } defer tx.Rollback() - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), "") + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, "") if err != nil { return nil, err } @@ -63,7 +63,7 @@ func (api *APIImpl) GetTransactionCount(ctx context.Context, address libcommon.A return nil, fmt.Errorf("getTransactionCount cannot open tx: %w", err1) } defer tx.Rollback() - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), "") + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, "") if err != nil { return nil, err } @@ -86,7 +86,7 @@ func (api *APIImpl) GetCode(ctx context.Context, address libcommon.Address, bloc if err != nil { return nil, fmt.Errorf("read chain config: %v", err) } - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), chainConfig.ChainName) + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, chainConfig.ChainName) if err != nil { return nil, err } @@ -112,7 +112,7 @@ func (api *APIImpl) GetStorageAt(ctx context.Context, address libcommon.Address, } defer tx.Rollback() - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), "") + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, "") if err != nil { return hexutility.Encode(common.LeftPadBytes(empty, 32)), err } @@ -137,7 +137,7 @@ func (api *APIImpl) Exist(ctx context.Context, address libcommon.Address, blockN } defer tx.Rollback() - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), "") + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, "") if err != nil { return false, err } diff --git a/turbo/jsonrpc/eth_api.go b/turbo/jsonrpc/eth_api.go index 4f298f3429e..7a7d9e465e1 100644 --- a/turbo/jsonrpc/eth_api.go +++ b/turbo/jsonrpc/eth_api.go @@ -22,7 +22,6 @@ import ( txpool "github.com/ledgerwatch/erigon-lib/gointerfaces/txpoolproto" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon-lib/kv/kvcfg" libstate "github.com/ledgerwatch/erigon-lib/state" types2 "github.com/ledgerwatch/erigon-lib/types" @@ -117,7 +116,6 @@ type BaseAPI struct { filters *rpchelper.Filters _chainConfig atomic.Pointer[chain.Config] _genesis atomic.Pointer[types.Block] - _historyV3 atomic.Pointer[bool] _pruneMode atomic.Pointer[prune.Mode] _blockReader services.FullBlockReader @@ -232,20 +230,6 @@ func (api *BaseAPI) blockWithSenders(ctx context.Context, tx kv.Tx, hash common. return block, nil } -func (api *BaseAPI) historyV3(tx kv.Tx) bool { - historyV3 := api._historyV3.Load() - if historyV3 != nil { - return *historyV3 - } - enabled, err := kvcfg.HistoryV3.Enabled(tx) - if err != nil { - log.Warn("HisoryV3Enabled: read", "err", err) - return false - } - api._historyV3.Store(&enabled) - return enabled -} - func (api *BaseAPI) chainConfigWithGenesis(ctx context.Context, tx kv.Tx) (*chain.Config, *types.Block, error) { cc, genesisBlock := api._chainConfig.Load(), api._genesis.Load() if cc != nil && genesisBlock != nil { diff --git a/turbo/jsonrpc/eth_block.go b/turbo/jsonrpc/eth_block.go index 131935603b3..5fc462a588b 100644 --- a/turbo/jsonrpc/eth_block.go +++ b/turbo/jsonrpc/eth_block.go @@ -80,16 +80,15 @@ func (api *APIImpl) CallBundle(ctx context.Context, txHashes []common.Hash, stat if err != nil { return nil, err } - histV3 := api.historyV3(tx) var stateReader state.StateReader if latest { cacheView, err := api.stateCache.View(ctx, tx) if err != nil { return nil, err } - stateReader = rpchelper.CreateLatestCachedStateReader(cacheView, tx, histV3) + stateReader = rpchelper.CreateLatestCachedStateReader(cacheView, tx) } else { - stateReader, err = rpchelper.CreateHistoryStateReader(tx, stateBlockNumber+1, 0, histV3, chainConfig.ChainName) + stateReader, err = rpchelper.CreateHistoryStateReader(tx, stateBlockNumber+1, 0, chainConfig.ChainName) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_call.go b/turbo/jsonrpc/eth_call.go index e30efae1e35..fc0372949c6 100644 --- a/turbo/jsonrpc/eth_call.go +++ b/turbo/jsonrpc/eth_call.go @@ -9,7 +9,6 @@ import ( "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" "github.com/ledgerwatch/log/v3" "google.golang.org/grpc" @@ -26,14 +25,12 @@ import ( "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/tracers/logger" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" ethapi2 "github.com/ledgerwatch/erigon/turbo/adapter/ethapi" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/transactions" - "github.com/ledgerwatch/erigon/turbo/trie" ) var latestNumOrHash = rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) @@ -68,7 +65,7 @@ func (api *APIImpl) Call(ctx context.Context, args ethapi2.CallArgs, blockNrOrHa return nil, nil } - stateReader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), chainConfig.ChainName) + stateReader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, chainConfig.ChainName) if err != nil { return nil, err } @@ -186,7 +183,7 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs if err != nil { return 0, err } - stateReader := rpchelper.CreateLatestCachedStateReader(cacheView, dbtx, api.historyV3(dbtx)) + stateReader := rpchelper.CreateLatestCachedStateReader(cacheView, dbtx) state := state.New(stateReader) if state == nil { return 0, fmt.Errorf("can't get the current state") @@ -244,7 +241,7 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs return 0, fmt.Errorf("could not find latest block in cache or db") } - stateReader, err := rpchelper.CreateStateReaderFromBlockNumber(ctx, dbtx, latestCanBlockNumber, isLatest, 0, api.stateCache, api.historyV3(dbtx), chainConfig.ChainName) + stateReader, err := rpchelper.CreateStateReaderFromBlockNumber(ctx, dbtx, latestCanBlockNumber, isLatest, 0, api.stateCache, chainConfig.ChainName) if err != nil { return 0, err } @@ -319,89 +316,88 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs // GetProof is partially implemented; no Storage proofs, and proofs must be for // blocks within maxGetProofRewindBlockCount blocks of the head. func (api *APIImpl) GetProof(ctx context.Context, address libcommon.Address, storageKeys []libcommon.Hash, blockNrOrHash rpc.BlockNumberOrHash) (*accounts.AccProofResult, error) { + return nil, fmt.Errorf("not supported by Erigon3") + /* + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - if api.historyV3(tx) { - return nil, fmt.Errorf("not supported by Erigon3") - } + blockNr, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + if err != nil { + return nil, err + } - blockNr, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) - if err != nil { - return nil, err - } + header, err := api._blockReader.HeaderByNumber(ctx, tx, blockNr) + if err != nil { + return nil, err + } - header, err := api._blockReader.HeaderByNumber(ctx, tx, blockNr) - if err != nil { - return nil, err - } + latestBlock, err := rpchelper.GetLatestBlockNumber(tx) + if err != nil { + return nil, err + } - latestBlock, err := rpchelper.GetLatestBlockNumber(tx) - if err != nil { - return nil, err - } + if latestBlock < blockNr { + // shouldn't happen, but check anyway + return nil, fmt.Errorf("block number is in the future latest=%d requested=%d", latestBlock, blockNr) + } - if latestBlock < blockNr { - // shouldn't happen, but check anyway - return nil, fmt.Errorf("block number is in the future latest=%d requested=%d", latestBlock, blockNr) - } + rl := trie.NewRetainList(0) + var loader *trie.FlatDBTrieLoader + if blockNr < latestBlock { + if latestBlock-blockNr > uint64(api.MaxGetProofRewindBlockCount) { + return nil, fmt.Errorf("requested block is too old, block must be within %d blocks of the head block number (currently %d)", uint64(api.MaxGetProofRewindBlockCount), latestBlock) + } + batch := membatchwithdb.NewMemoryBatch(tx, api.dirs.Tmp, api.logger) + defer batch.Rollback() - rl := trie.NewRetainList(0) - var loader *trie.FlatDBTrieLoader - if blockNr < latestBlock { - if latestBlock-blockNr > uint64(api.MaxGetProofRewindBlockCount) { - return nil, fmt.Errorf("requested block is too old, block must be within %d blocks of the head block number (currently %d)", uint64(api.MaxGetProofRewindBlockCount), latestBlock) - } - batch := membatchwithdb.NewMemoryBatch(tx, api.dirs.Tmp, api.logger) - defer batch.Rollback() + unwindState := &stagedsync.UnwindState{UnwindPoint: blockNr} + stageState := &stagedsync.StageState{BlockNumber: latestBlock} + + hashStageCfg := stagedsync.StageHashStateCfg(nil, api.dirs, api.historyV3(batch)) + if err := stagedsync.UnwindHashStateStage(unwindState, stageState, batch, hashStageCfg, ctx, api.logger); err != nil { + return nil, err + } - unwindState := &stagedsync.UnwindState{UnwindPoint: blockNr} - stageState := &stagedsync.StageState{BlockNumber: latestBlock} + interHashStageCfg := stagedsync.StageTrieCfg(nil, false, false, false, api.dirs.Tmp, api._blockReader, nil, api.historyV3(batch), api._agg) + loader, err = stagedsync.UnwindIntermediateHashesForTrieLoader("eth_getProof", rl, unwindState, stageState, batch, interHashStageCfg, nil, nil, ctx.Done(), api.logger) + if err != nil { + return nil, err + } + tx = batch + } else { + loader = trie.NewFlatDBTrieLoader("eth_getProof", rl, nil, nil, false) + } - hashStageCfg := stagedsync.StageHashStateCfg(nil, api.dirs, api.historyV3(batch)) - if err := stagedsync.UnwindHashStateStage(unwindState, stageState, batch, hashStageCfg, ctx, api.logger); err != nil { + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, "") + if err != nil { return nil, err } - - interHashStageCfg := stagedsync.StageTrieCfg(nil, false, false, false, api.dirs.Tmp, api._blockReader, nil, api.historyV3(batch), api._agg) - loader, err = stagedsync.UnwindIntermediateHashesForTrieLoader("eth_getProof", rl, unwindState, stageState, batch, interHashStageCfg, nil, nil, ctx.Done(), api.logger) + a, err := reader.ReadAccountData(address) + if err != nil { + return nil, err + } + if a == nil { + a = &accounts.Account{} + } + pr, err := trie.NewProofRetainer(address, a, storageKeys, rl) if err != nil { return nil, err } - tx = batch - } else { - loader = trie.NewFlatDBTrieLoader("eth_getProof", rl, nil, nil, false) - } - - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), "") - if err != nil { - return nil, err - } - a, err := reader.ReadAccountData(address) - if err != nil { - return nil, err - } - if a == nil { - a = &accounts.Account{} - } - pr, err := trie.NewProofRetainer(address, a, storageKeys, rl) - if err != nil { - return nil, err - } - loader.SetProofRetainer(pr) - root, err := loader.CalcTrieRoot(tx, nil) - if err != nil { - return nil, err - } + loader.SetProofRetainer(pr) + root, err := loader.CalcTrieRoot(tx, nil) + if err != nil { + return nil, err + } - if root != header.Root { - return nil, fmt.Errorf("mismatch in expected state root computed %v vs %v indicates bug in proof implementation", root, header.Root) - } - return pr.ProofResult() + if root != header.Root { + return nil, fmt.Errorf("mismatch in expected state root computed %v vs %v indicates bug in proof implementation", root, header.Root) + } + return pr.ProofResult() + */ } func (api *APIImpl) tryBlockFromLru(hash libcommon.Hash) *types.Block { @@ -455,16 +451,15 @@ func (api *APIImpl) CreateAccessList(ctx context.Context, args ethapi2.CallArgs, if block == nil { return nil, nil } - histV3 := api.historyV3(tx) var stateReader state.StateReader if latest { cacheView, err := api.stateCache.View(ctx, tx) if err != nil { return nil, err } - stateReader = rpchelper.CreateLatestCachedStateReader(cacheView, tx, histV3) + stateReader = rpchelper.CreateLatestCachedStateReader(cacheView, tx) } else { - stateReader, err = rpchelper.CreateHistoryStateReader(tx, blockNumber+1, 0, histV3, chainConfig.ChainName) + stateReader, err = rpchelper.CreateHistoryStateReader(tx, blockNumber+1, 0, chainConfig.ChainName) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_callMany.go b/turbo/jsonrpc/eth_callMany.go index c40f0ca68d6..fa9b9c59ffa 100644 --- a/turbo/jsonrpc/eth_callMany.go +++ b/turbo/jsonrpc/eth_callMany.go @@ -130,7 +130,7 @@ func (api *APIImpl) CallMany(ctx context.Context, bundles []Bundle, simulateCont replayTransactions = block.Transactions()[:transactionIndex] - stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNum-1)), 0, api.filters, api.stateCache, api.historyV3(tx), chainConfig.ChainName) + stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNum-1)), 0, api.filters, api.stateCache, chainConfig.ChainName) if err != nil { return nil, err diff --git a/turbo/jsonrpc/eth_call_test.go b/turbo/jsonrpc/eth_call_test.go index ef5654e3d52..1048dd8101d 100644 --- a/turbo/jsonrpc/eth_call_test.go +++ b/turbo/jsonrpc/eth_call_test.go @@ -534,13 +534,13 @@ func chainWithDeployedContract(t *testing.T) (*mock.MockSentry, libcommon.Addres } defer tx.Rollback() - stateReader, err := rpchelper.CreateHistoryStateReader(tx, 1, 0, m.HistoryV3, "") + stateReader, err := rpchelper.CreateHistoryStateReader(tx, 1, 0, "") assert.NoError(t, err) st := state.New(stateReader) assert.NoError(t, err) assert.False(t, st.Exist(contractAddr), "Contract should not exist at block #1") - stateReader, err = rpchelper.CreateHistoryStateReader(tx, 2, 0, m.HistoryV3, "") + stateReader, err = rpchelper.CreateHistoryStateReader(tx, 2, 0, "") assert.NoError(t, err) st = state.New(stateReader) assert.NoError(t, err) diff --git a/turbo/jsonrpc/eth_receipts.go b/turbo/jsonrpc/eth_receipts.go index 5e9e1c52754..6916b45bceb 100644 --- a/turbo/jsonrpc/eth_receipts.go +++ b/turbo/jsonrpc/eth_receipts.go @@ -1,9 +1,7 @@ package jsonrpc import ( - "bytes" "context" - "encoding/binary" "fmt" "math/big" @@ -16,7 +14,6 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/bitmapdb" "github.com/ledgerwatch/erigon-lib/kv/iter" @@ -32,7 +29,6 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/filters" - "github.com/ledgerwatch/erigon/ethdb/cbor" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/transactions" @@ -55,7 +51,7 @@ func (api *BaseAPI) getReceipts(ctx context.Context, tx kv.Tx, block *types.Bloc return nil, err } - _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, 0, api.historyV3(tx)) + _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, 0) if err != nil { return nil, err } @@ -163,91 +159,7 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) (t end = latest } - if api.historyV3(tx) { - return api.getLogsV3(ctx, tx.(kv.TemporalTx), begin, end, crit) - } - blockNumbers := bitmapdb.NewBitmap() - defer bitmapdb.ReturnToPool(blockNumbers) - if err := applyFilters(blockNumbers, tx, begin, end, crit); err != nil { - return logs, err - } - if blockNumbers.IsEmpty() { - return logs, nil - } - addrMap := make(map[common.Address]struct{}, len(crit.Addresses)) - for _, v := range crit.Addresses { - addrMap[v] = struct{}{} - } - iter := blockNumbers.Iterator() - for iter.HasNext() { - if err := ctx.Err(); err != nil { - return nil, err - } - - blockNumber := uint64(iter.Next()) - var logIndex uint - var txIndex uint - var blockLogs []*types.Log - - it, err := tx.Prefix(kv.Log, hexutility.EncodeTs(blockNumber)) - if err != nil { - return nil, err - } - for it.HasNext() { - k, v, err := it.Next() - if err != nil { - return logs, err - } - - var logs types.Logs - if err := cbor.Unmarshal(&logs, bytes.NewReader(v)); err != nil { - return logs, fmt.Errorf("receipt unmarshal failed: %w", err) - } - for _, log := range logs { - log.Index = logIndex - logIndex++ - } - filtered := logs.Filter(addrMap, crit.Topics) - if len(filtered) == 0 { - continue - } - txIndex = uint(binary.BigEndian.Uint32(k[8:])) - for _, log := range filtered { - log.TxIndex = txIndex - } - blockLogs = append(blockLogs, filtered...) - } - it.Close() - if len(blockLogs) == 0 { - continue - } - - blockHash, err := api._blockReader.CanonicalHash(ctx, tx, blockNumber) - if err != nil { - return nil, err - } - - body, err := api._blockReader.BodyWithTransactions(ctx, tx, blockHash, blockNumber) - if err != nil { - return nil, err - } - if body == nil { - return nil, fmt.Errorf("block not found %d", blockNumber) - } - for _, log := range blockLogs { - log.BlockNumber = blockNumber - log.BlockHash = blockHash - // bor transactions are at the end of the bodies transactions (added manually but not actually part of the block) - if log.TxIndex == uint(len(body.Transactions)) { - log.TxHash = bortypes.ComputeBorTxHash(blockNumber, blockHash) - } else { - log.TxHash = body.Transactions[log.TxIndex].Hash() - } - } - logs = append(logs, blockLogs...) - } - - return logs, nil + return api.getLogsV3(ctx, tx.(kv.TemporalTx), begin, end, crit) } // The Topic list restricts matches to particular event topics. Each event has a list diff --git a/turbo/jsonrpc/otterscan_api.go b/turbo/jsonrpc/otterscan_api.go index 4b311ec647a..2c38720e1a6 100644 --- a/turbo/jsonrpc/otterscan_api.go +++ b/turbo/jsonrpc/otterscan_api.go @@ -2,7 +2,6 @@ package jsonrpc import ( "context" - "errors" "fmt" "math/big" @@ -126,7 +125,7 @@ func (api *OtterscanAPIImpl) runTracer(ctx context.Context, tx kv.Tx, hash commo } engine := api.engine() - msg, blockCtx, txCtx, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, int(txIndex), api.historyV3(tx)) + msg, blockCtx, txCtx, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, int(txIndex)) if err != nil { return nil, err } @@ -181,76 +180,7 @@ func (api *OtterscanAPIImpl) SearchTransactionsBefore(ctx context.Context, addr } defer dbtx.Rollback() - if api.historyV3(dbtx) { - return api.searchTransactionsBeforeV3(dbtx.(kv.TemporalTx), ctx, addr, blockNum, pageSize) - } - - callFromCursor, err := dbtx.Cursor(kv.CallFromIndex) - if err != nil { - return nil, err - } - defer callFromCursor.Close() - - callToCursor, err := dbtx.Cursor(kv.CallToIndex) - if err != nil { - return nil, err - } - defer callToCursor.Close() - - chainConfig, err := api.chainConfig(ctx, dbtx) - if err != nil { - return nil, err - } - - isFirstPage := false - if blockNum == 0 { - isFirstPage = true - } else { - // Internal search code considers blockNum [including], so adjust the value - blockNum-- - } - - // Initialize search cursors at the first shard >= desired block number - callFromProvider := NewCallCursorBackwardBlockProvider(callFromCursor, addr, blockNum) - callToProvider := NewCallCursorBackwardBlockProvider(callToCursor, addr, blockNum) - callFromToProvider := newCallFromToBlockProvider(false, callFromProvider, callToProvider) - - txs := make([]*RPCTransaction, 0, pageSize) - receipts := make([]map[string]interface{}, 0, pageSize) - - resultCount := uint16(0) - hasMore := true - for { - if resultCount >= pageSize || !hasMore { - break - } - - var results []*TransactionsWithReceipts - results, hasMore, err = api.traceBlocks(ctx, addr, chainConfig, pageSize, resultCount, callFromToProvider) - if err != nil { - return nil, err - } - - for _, r := range results { - if r == nil { - return nil, errors.New("internal error during search tracing") - } - - for i := len(r.Txs) - 1; i >= 0; i-- { - txs = append(txs, r.Txs[i]) - } - for i := len(r.Receipts) - 1; i >= 0; i-- { - receipts = append(receipts, r.Receipts[i]) - } - - resultCount += uint16(len(r.Txs)) - if resultCount >= pageSize { - break - } - } - } - - return &TransactionsWithReceipts{txs, receipts, isFirstPage, !hasMore}, nil + return api.searchTransactionsBeforeV3(dbtx.(kv.TemporalTx), ctx, addr, blockNum, pageSize) } // Search transactions that touch a certain address. @@ -272,78 +202,7 @@ func (api *OtterscanAPIImpl) SearchTransactionsAfter(ctx context.Context, addr c } defer dbtx.Rollback() - if api.historyV3(dbtx) { - return api.searchTransactionsAfterV3(dbtx.(kv.TemporalTx), ctx, addr, blockNum, pageSize) - } - - callFromCursor, err := dbtx.Cursor(kv.CallFromIndex) - if err != nil { - return nil, err - } - defer callFromCursor.Close() - - callToCursor, err := dbtx.Cursor(kv.CallToIndex) - if err != nil { - return nil, err - } - defer callToCursor.Close() - - chainConfig, err := api.chainConfig(ctx, dbtx) - if err != nil { - return nil, err - } - - isLastPage := false - if blockNum == 0 { - isLastPage = true - } else { - // Internal search code considers blockNum [including], so adjust the value - blockNum++ - } - - // Initialize search cursors at the first shard >= desired block number - callFromProvider := NewCallCursorForwardBlockProvider(callFromCursor, addr, blockNum) - callToProvider := NewCallCursorForwardBlockProvider(callToCursor, addr, blockNum) - callFromToProvider := newCallFromToBlockProvider(true, callFromProvider, callToProvider) - - txs := make([]*RPCTransaction, 0, pageSize) - receipts := make([]map[string]interface{}, 0, pageSize) - - resultCount := uint16(0) - hasMore := true - for { - if resultCount >= pageSize || !hasMore { - break - } - - var results []*TransactionsWithReceipts - results, hasMore, err = api.traceBlocks(ctx, addr, chainConfig, pageSize, resultCount, callFromToProvider) - if err != nil { - return nil, err - } - - for _, r := range results { - if r == nil { - return nil, errors.New("internal error during search tracing") - } - - txs = append(txs, r.Txs...) - receipts = append(receipts, r.Receipts...) - - resultCount += uint16(len(r.Txs)) - if resultCount >= pageSize { - break - } - } - } - - // Reverse results - lentxs := len(txs) - for i := 0; i < lentxs/2; i++ { - txs[i], txs[lentxs-1-i] = txs[lentxs-1-i], txs[i] - receipts[i], receipts[lentxs-1-i] = receipts[lentxs-1-i], receipts[i] - } - return &TransactionsWithReceipts{txs, receipts, !hasMore, isLastPage}, nil + return api.searchTransactionsAfterV3(dbtx.(kv.TemporalTx), ctx, addr, blockNum, pageSize) } func (api *OtterscanAPIImpl) traceBlocks(ctx context.Context, addr common.Address, chainConfig *chain.Config, pageSize, resultCount uint16, callFromToProvider BlockProvider) ([]*TransactionsWithReceipts, bool, error) { diff --git a/turbo/jsonrpc/otterscan_contract_creator.go b/turbo/jsonrpc/otterscan_contract_creator.go index 409f7cfaba2..f64abf4828e 100644 --- a/turbo/jsonrpc/otterscan_contract_creator.go +++ b/turbo/jsonrpc/otterscan_contract_creator.go @@ -1,7 +1,6 @@ package jsonrpc import ( - "bytes" "context" "fmt" "sort" @@ -9,10 +8,8 @@ import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/bitmapdb" "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" - "github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/core/types/accounts" @@ -31,7 +28,7 @@ func (api *OtterscanAPIImpl) GetContractCreator(ctx context.Context, addr common } defer tx.Rollback() - latestState := rpchelper.NewLatestStateReader(tx, api.historyV3(tx)) + latestState := rpchelper.NewLatestStateReader(tx) plainStateAcc, err := latestState.ReadAccountData(addr) if err != nil { return nil, err @@ -53,252 +50,125 @@ func (api *OtterscanAPIImpl) GetContractCreator(ctx context.Context, addr common } var acc accounts.Account - if api.historyV3(tx) { - ttx := tx.(kv.TemporalTx) - - // Contract; search for creation tx; navigate forward on AccountsHistory/ChangeSets - // - // We traversing history Index - because it's cheaper than traversing History - // and probe History periodically. In result will have small range of blocks. For binary search or full-scan. - // - // popular contracts may have dozens of states changes due to ETH deposits/withdraw after contract creation, - // so it is optimal to search from the beginning even if the contract has multiple - // incarnations. - var prevTxnID, nextTxnID uint64 - it, err := ttx.IndexRange(kv.AccountsHistoryIdx, addr[:], 0, -1, order.Asc, kv.Unlim) - if err != nil { - return nil, err - } - defer it.Close() - for i := 0; it.HasNext(); i++ { - txnID, err := it.Next() - if err != nil { - return nil, err - } - - if i%4096 != 0 { // probe history periodically, not on every change - nextTxnID = txnID - continue - } - - v, ok, err := ttx.HistorySeek(kv.AccountsHistory, addr[:], txnID) - if err != nil { - log.Error("Unexpected error, couldn't find changeset", "txNum", txnID, "addr", addr) - return nil, err - } - - if !ok { - err = fmt.Errorf("couldn't find history txnID=%v addr=%v", txnID, addr) - log.Error("[rpc] Unexpected error", "err", err) - return nil, err - } - if len(v) == 0 { // creation, but maybe not our Incarnation - prevTxnID = txnID - continue - } - - if err := accounts.DeserialiseV3(&acc, v); err != nil { - return nil, err - } - // Found the shard where the incarnation change happens; ignore all next index values - if acc.Incarnation >= plainStateAcc.Incarnation { - nextTxnID = txnID - break - } - prevTxnID = txnID - } - - // The sort.Search function finds the first block where the incarnation has - // changed to the desired one, so we get the previous block from the bitmap; - // however if the creationTxnID block is already the first one from the bitmap, it means - // the block we want is the max block from the previous shard. - var creationTxnID uint64 - var searchErr error - - if nextTxnID == 0 { - nextTxnID = prevTxnID + 1 - } - // Binary search in [prevTxnID, nextTxnID] range; get first block where desired incarnation appears - // can be replaced by full-scan over ttx.HistoryRange([prevTxnID, nextTxnID])? - idx := sort.Search(int(nextTxnID-prevTxnID), func(i int) bool { - txnID := uint64(i) + prevTxnID - v, ok, err := ttx.HistorySeek(kv.AccountsHistory, addr[:], txnID) - if err != nil { - log.Error("[rpc] Unexpected error, couldn't find changeset", "txNum", i, "addr", addr) - panic(err) - } - if !ok { - return false - } - if len(v) == 0 { - creationTxnID = cmp.Max(creationTxnID, txnID) - return false - } - - if err := accounts.DeserialiseV3(&acc, v); err != nil { - searchErr = err - return false - } - if acc.Incarnation < plainStateAcc.Incarnation { - creationTxnID = cmp.Max(creationTxnID, txnID) - return false - } - return true - }) - if searchErr != nil { - return nil, searchErr - } - if creationTxnID == 0 { - return nil, fmt.Errorf("binary search between %d-%d doesn't find anything", nextTxnID, prevTxnID) - } - - ok, bn, err := rawdbv3.TxNums.FindBlockNum(tx, creationTxnID) - if err != nil { - return nil, err - } - if !ok { - return nil, fmt.Errorf("block not found by txnID=%d", creationTxnID) - } - minTxNum, err := rawdbv3.TxNums.Min(tx, bn) - if err != nil { - return nil, err - } - txIndex := int(creationTxnID) - int(minTxNum) - 1 /* system-contract */ - if txIndex == -1 { - txIndex = (idx + int(prevTxnID)) - int(minTxNum) - 1 - } - - // Trace block, find tx and contract creator - tracer := NewCreateTracer(ctx, addr) - if err := api.genericTracer(tx, ctx, bn, creationTxnID, txIndex, chainConfig, tracer); err != nil { - return nil, err - } - return &ContractCreatorData{ - Tx: tracer.Tx.Hash(), - Creator: tracer.Creator, - }, nil - } + ttx := tx.(kv.TemporalTx) // Contract; search for creation tx; navigate forward on AccountsHistory/ChangeSets // - // We search shards in forward order on purpose because popular contracts may have - // dozens of states changes due to ETH deposits/withdraw after contract creation, + // We traversing history Index - because it's cheaper than traversing History + // and probe History periodically. In result will have small range of blocks. For binary search or full-scan. + // + // popular contracts may have dozens of states changes due to ETH deposits/withdraw after contract creation, // so it is optimal to search from the beginning even if the contract has multiple // incarnations. - accHistory, err := tx.Cursor(kv.E2AccountsHistory) + var prevTxnID, nextTxnID uint64 + it, err := ttx.IndexRange(kv.AccountsHistoryIdx, addr[:], 0, -1, order.Asc, kv.Unlim) if err != nil { return nil, err } - defer accHistory.Close() - - accCS, err := tx.CursorDupSort(kv.AccountChangeSet) - if err != nil { - return nil, err - } - defer accCS.Close() - - // Locate shard that contains the block where incarnation changed - acs := historyv2.Mapper[kv.AccountChangeSet] - k, v, err := accHistory.Seek(acs.IndexChunkKey(addr.Bytes(), 0)) - if err != nil { - return nil, err - } - if !bytes.HasPrefix(k, addr.Bytes()) { - log.Error("Couldn't find any shard for account history", "addr", addr) - return nil, fmt.Errorf("could't find any shard for account history addr=%v", addr) - } - - bm := bitmapdb.NewBitmap64() - defer bitmapdb.ReturnToPool64(bm) - prevShardMaxBl := uint64(0) - for { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - _, err := bm.ReadFrom(bytes.NewReader(v)) + defer it.Close() + for i := 0; it.HasNext(); i++ { + txnID, err := it.Next() if err != nil { return nil, err } - // Shortcut precheck - st, err := acs.Find(accCS, bm.Maximum(), addr.Bytes()) + if i%4096 != 0 { // probe history periodically, not on every change + nextTxnID = txnID + continue + } + + v, ok, err := ttx.HistorySeek(kv.AccountsHistory, addr[:], txnID) if err != nil { + log.Error("Unexpected error, couldn't find changeset", "txNum", txnID, "addr", addr) return nil, err } - if st == nil { - log.Error("Unexpected error, couldn't find changeset", "block", bm.Maximum(), "addr", addr) - return nil, fmt.Errorf("unexpected error, couldn't find changeset block=%v addr=%v", bm.Maximum(), addr) - } - // Found the shard where the incarnation change happens; ignore all - // next shards - if err := acc.DecodeForStorage(st); err != nil { + if !ok { + err = fmt.Errorf("couldn't find history txnID=%v addr=%v", txnID, addr) + log.Error("[rpc] Unexpected error", "err", err) return nil, err } - if acc.Incarnation >= plainStateAcc.Incarnation { - break + if len(v) == 0 { // creation, but maybe not our Incarnation + prevTxnID = txnID + continue } - prevShardMaxBl = bm.Maximum() - k, v, err = accHistory.Next() - if err != nil { + if err := accounts.DeserialiseV3(&acc, v); err != nil { return nil, err } - - // No more shards; it means the max bl from previous shard - // contains the incarnation change - if !bytes.HasPrefix(k, addr.Bytes()) { + // Found the shard where the incarnation change happens; ignore all next index values + if acc.Incarnation >= plainStateAcc.Incarnation { + nextTxnID = txnID break } + prevTxnID = txnID } - // Binary search block number inside shard; get first block where desired - // incarnation appears - blocks := bm.ToArray() + // The sort.Search function finds the first block where the incarnation has + // changed to the desired one, so we get the previous block from the bitmap; + // however if the creationTxnID block is already the first one from the bitmap, it means + // the block we want is the max block from the previous shard. + var creationTxnID uint64 var searchErr error - r := sort.Search(len(blocks), func(i int) bool { - bl := blocks[i] - st, err := acs.Find(accCS, bl, addr.Bytes()) + + if nextTxnID == 0 { + nextTxnID = prevTxnID + 1 + } + // Binary search in [prevTxnID, nextTxnID] range; get first block where desired incarnation appears + // can be replaced by full-scan over ttx.HistoryRange([prevTxnID, nextTxnID])? + idx := sort.Search(int(nextTxnID-prevTxnID), func(i int) bool { + txnID := uint64(i) + prevTxnID + v, ok, err := ttx.HistorySeek(kv.AccountsHistory, addr[:], txnID) if err != nil { - searchErr = err + log.Error("[rpc] Unexpected error, couldn't find changeset", "txNum", i, "addr", addr) + panic(err) + } + if !ok { return false } - if st == nil { - log.Error("Unexpected error, couldn't find changeset", "block", bl, "addr", addr) + if len(v) == 0 { + creationTxnID = cmp.Max(creationTxnID, txnID) return false } - if err := acc.DecodeForStorage(st); err != nil { + if err := accounts.DeserialiseV3(&acc, v); err != nil { searchErr = err return false } if acc.Incarnation < plainStateAcc.Incarnation { + creationTxnID = cmp.Max(creationTxnID, txnID) return false } return true }) - if searchErr != nil { return nil, searchErr } + if creationTxnID == 0 { + return nil, fmt.Errorf("binary search between %d-%d doesn't find anything", nextTxnID, prevTxnID) + } - // The sort.Search function finds the first block where the incarnation has - // changed to the desired one, so we get the previous block from the bitmap; - // however if the found block is already the first one from the bitmap, it means - // the block we want is the max block from the previous shard. - blockFound := prevShardMaxBl - if r > 0 { - blockFound = blocks[r-1] + ok, bn, err := rawdbv3.TxNums.FindBlockNum(tx, creationTxnID) + if err != nil { + return nil, err + } + if !ok { + return nil, fmt.Errorf("block not found by txnID=%d", creationTxnID) } + minTxNum, err := rawdbv3.TxNums.Min(tx, bn) + if err != nil { + return nil, err + } + txIndex := int(creationTxnID) - int(minTxNum) - 1 /* system-contract */ + if txIndex == -1 { + txIndex = (idx + int(prevTxnID)) - int(minTxNum) - 1 + } + // Trace block, find tx and contract creator tracer := NewCreateTracer(ctx, addr) - if err := api.genericTracer(tx, ctx, blockFound, 0, 0, chainConfig, tracer); err != nil { + if err := api.genericTracer(tx, ctx, bn, creationTxnID, txIndex, chainConfig, tracer); err != nil { return nil, err } - return &ContractCreatorData{ Tx: tracer.Tx.Hash(), Creator: tracer.Creator, diff --git a/turbo/jsonrpc/otterscan_generic_tracer.go b/turbo/jsonrpc/otterscan_generic_tracer.go index 7de7afe28e9..64dd19e5dfc 100644 --- a/turbo/jsonrpc/otterscan_generic_tracer.go +++ b/turbo/jsonrpc/otterscan_generic_tracer.go @@ -4,17 +4,12 @@ import ( "context" "github.com/ledgerwatch/erigon-lib/chain" - "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cmd/state/exec3" - "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/turbo/shards" ) type GenericTracer interface { @@ -24,91 +19,31 @@ type GenericTracer interface { } func (api *OtterscanAPIImpl) genericTracer(dbtx kv.Tx, ctx context.Context, blockNum, txnID uint64, txIndex int, chainConfig *chain.Config, tracer GenericTracer) error { - if api.historyV3(dbtx) { - ttx := dbtx.(kv.TemporalTx) - executor := exec3.NewTraceWorker(ttx, chainConfig, api.engine(), api._blockReader, tracer) + ttx := dbtx.(kv.TemporalTx) + executor := exec3.NewTraceWorker(ttx, chainConfig, api.engine(), api._blockReader, tracer) - // if block number changed, calculate all related field - header, err := api._blockReader.HeaderByNumber(ctx, ttx, blockNum) - if err != nil { - return err - } - if header == nil { - log.Warn("[rpc] header is nil", "blockNum", blockNum) - return nil - } - executor.ChangeBlock(header) - - txn, err := api._txnReader.TxnByIdxInBlock(ctx, ttx, blockNum, txIndex) - if err != nil { - return err - } - if txn == nil { - log.Warn("[rpc genericTracer] tx is nil", "blockNum", blockNum, "txIndex", txIndex) - return nil - } - _, err = executor.ExecTxn(txnID, txIndex, txn) - if err != nil { - return err - } - return nil - } - - reader, err := rpchelper.CreateHistoryStateReader(dbtx, blockNum, txIndex, api.historyV3(dbtx), chainConfig.ChainName) + // if block number changed, calculate all related field + header, err := api._blockReader.HeaderByNumber(ctx, ttx, blockNum) if err != nil { return err } - stateCache := shards.NewStateCache(32, 0 /* no limit */) - cachedReader := state.NewCachedReader(reader, stateCache) - noop := state.NewNoopWriter() - cachedWriter := state.NewCachedWriter(noop, stateCache) - - ibs := state.New(cachedReader) - - getHeader := func(hash common.Hash, number uint64) *types.Header { - h, e := api._blockReader.Header(ctx, dbtx, hash, number) - if e != nil { - log.Error("getHeader error", "number", number, "hash", hash, "err", e) - } - return h + if header == nil { + log.Warn("[rpc] header is nil", "blockNum", blockNum) + return nil } - engine := api.engine() - block, err := api.blockByNumberWithSenders(ctx, dbtx, blockNum) + executor.ChangeBlock(header) + + txn, err := api._txnReader.TxnByIdxInBlock(ctx, ttx, blockNum, txIndex) if err != nil { return err } - if block == nil { + if txn == nil { + log.Warn("[rpc genericTracer] tx is nil", "blockNum", blockNum, "txIndex", txIndex) return nil } - - header := block.Header() - rules := chainConfig.Rules(block.NumberU64(), header.Time) - signer := types.MakeSigner(chainConfig, blockNum, header.Time) - for idx, tx := range block.Transactions() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - ibs.SetTxContext(tx.Hash(), block.Hash(), idx) - - msg, _ := tx.AsMessage(*signer, header.BaseFee, rules) - - BlockContext := core.NewEVMBlockContext(header, core.GetHashFn(header, getHeader), engine, nil) - TxContext := core.NewEVMTxContext(msg) - - vmenv := vm.NewEVM(BlockContext, TxContext, ibs, chainConfig, vm.Config{Debug: true, Tracer: tracer}) - if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.GetGas()).AddBlobGas(tx.GetBlobGas()), true /* refunds */, false /* gasBailout */); err != nil { - return err - } - _ = ibs.FinalizeTx(rules, cachedWriter) - - if tracer.Found() { - tracer.SetTransaction(tx) - return nil - } + _, err = executor.ExecTxn(txnID, txIndex, txn) + if err != nil { + return err } - return nil } diff --git a/turbo/jsonrpc/otterscan_has_code.go b/turbo/jsonrpc/otterscan_has_code.go index af442e8d000..e7e18ecdcea 100644 --- a/turbo/jsonrpc/otterscan_has_code.go +++ b/turbo/jsonrpc/otterscan_has_code.go @@ -26,7 +26,7 @@ func (api *OtterscanAPIImpl) HasCode(ctx context.Context, address common.Address return false, err } - reader, err := rpchelper.CreateHistoryStateReader(tx, blockNumber, 0, api.historyV3(tx), chainConfig.ChainName) + reader, err := rpchelper.CreateHistoryStateReader(tx, blockNumber, 0, chainConfig.ChainName) if err != nil { return false, err } diff --git a/turbo/jsonrpc/otterscan_search_trace.go b/turbo/jsonrpc/otterscan_search_trace.go index 38ec3698f91..57f5682df5f 100644 --- a/turbo/jsonrpc/otterscan_search_trace.go +++ b/turbo/jsonrpc/otterscan_search_trace.go @@ -52,7 +52,7 @@ func (api *OtterscanAPIImpl) traceBlock(dbtx kv.Tx, ctx context.Context, blockNu return false, nil, err } - reader, err := rpchelper.CreateHistoryStateReader(dbtx, blockNum, 0, api.historyV3(dbtx), chainConfig.ChainName) + reader, err := rpchelper.CreateHistoryStateReader(dbtx, blockNum, 0, chainConfig.ChainName) if err != nil { return false, nil, err } diff --git a/turbo/jsonrpc/otterscan_transaction_by_sender_and_nonce.go b/turbo/jsonrpc/otterscan_transaction_by_sender_and_nonce.go index 1a04ec23211..b0fee2d20a8 100644 --- a/turbo/jsonrpc/otterscan_transaction_by_sender_and_nonce.go +++ b/turbo/jsonrpc/otterscan_transaction_by_sender_and_nonce.go @@ -1,18 +1,15 @@ package jsonrpc import ( - "bytes" "context" "fmt" "sort" - "github.com/RoaringBitmap/roaring/roaring64" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" - "github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/core/types/accounts" @@ -26,220 +23,79 @@ func (api *OtterscanAPIImpl) GetTransactionBySenderAndNonce(ctx context.Context, defer tx.Rollback() var acc accounts.Account - if api.historyV3(tx) { - ttx := tx.(kv.TemporalTx) - it, err := ttx.IndexRange(kv.AccountsHistoryIdx, addr[:], -1, -1, order.Asc, kv.Unlim) + ttx := tx.(kv.TemporalTx) + it, err := ttx.IndexRange(kv.AccountsHistoryIdx, addr[:], -1, -1, order.Asc, kv.Unlim) + if err != nil { + return nil, err + } + + var prevTxnID, nextTxnID uint64 + for i := 0; it.HasNext(); i++ { + txnID, err := it.Next() if err != nil { return nil, err } - var prevTxnID, nextTxnID uint64 - for i := 0; it.HasNext(); i++ { - txnID, err := it.Next() - if err != nil { - return nil, err - } - - if i%4096 != 0 { // probe history periodically, not on every change - nextTxnID = txnID - continue - } - - v, ok, err := ttx.HistorySeek(kv.AccountsHistory, addr[:], txnID) - if err != nil { - log.Error("Unexpected error, couldn't find changeset", "txNum", i, "addr", addr) - return nil, err - } - if !ok { - err = fmt.Errorf("couldn't find history txnID=%v addr=%v", txnID, addr) - log.Error("[rpc] Unexpected error", "err", err) - return nil, err - } - - if len(v) == 0 { // creation, but maybe not our Incarnation - prevTxnID = txnID - continue - } - - if err := accounts.DeserialiseV3(&acc, v); err != nil { - return nil, err - } - // Desired nonce was found in this chunk - if acc.Nonce > nonce { - break - } - prevTxnID = txnID - } - - // The sort.Search function finds the first block where the incarnation has - // changed to the desired one, so we get the previous block from the bitmap; - // however if the creationTxnID block is already the first one from the bitmap, it means - // the block we want is the max block from the previous shard. - var creationTxnID uint64 - var searchErr error - - if nextTxnID == 0 { - nextTxnID = prevTxnID + 1 + if i%4096 != 0 { // probe history periodically, not on every change + nextTxnID = txnID + continue } - // Binary search in [prevTxnID, nextTxnID] range; get first block where desired incarnation appears - // can be replaced by full-scan over ttx.HistoryRange([prevTxnID, nextTxnID])? - idx := sort.Search(int(nextTxnID-prevTxnID), func(i int) bool { - txnID := uint64(i) + prevTxnID - v, ok, err := ttx.HistorySeek(kv.AccountsHistory, addr[:], txnID) - if err != nil { - log.Error("[rpc] Unexpected error, couldn't find changeset", "txNum", i, "addr", addr) - panic(err) - } - if !ok { - return false - } - if len(v) == 0 { - creationTxnID = cmp.Max(creationTxnID, txnID) - return false - } - - if err := accounts.DeserialiseV3(&acc, v); err != nil { - searchErr = err - return false - } - // Since the state contains the nonce BEFORE the block changes, we look for - // the block when the nonce changed to be > the desired once, which means the - // previous history block contains the actual change; it may contain multiple - // nonce changes. - if acc.Nonce <= nonce { - creationTxnID = cmp.Max(creationTxnID, txnID) - return false - } - return true - }) - if searchErr != nil { - return nil, searchErr - } - if creationTxnID == 0 { - return nil, fmt.Errorf("binary search between %d-%d doesn't find anything", nextTxnID, prevTxnID) - } - ok, bn, err := rawdbv3.TxNums.FindBlockNum(tx, creationTxnID) + v, ok, err := ttx.HistorySeek(kv.AccountsHistory, addr[:], txnID) if err != nil { + log.Error("Unexpected error, couldn't find changeset", "txNum", i, "addr", addr) return nil, err } if !ok { - return nil, fmt.Errorf("block not found by txnID=%d", creationTxnID) - } - minTxNum, err := rawdbv3.TxNums.Min(tx, bn) - if err != nil { + err = fmt.Errorf("couldn't find history txnID=%v addr=%v", txnID, addr) + log.Error("[rpc] Unexpected error", "err", err) return nil, err } - txIndex := int(creationTxnID) - int(minTxNum) - 1 /* system-tx */ - if txIndex == -1 { - txIndex = (idx + int(prevTxnID)) - int(minTxNum) - 1 - } - txn, err := api._txnReader.TxnByIdxInBlock(ctx, ttx, bn, txIndex) - if err != nil { - return nil, err - } - if txn == nil { - log.Warn("[rpc] tx is nil", "blockNum", bn, "txIndex", txIndex) - return nil, nil - } - found := txn.GetNonce() == nonce - if !found { - return nil, nil - } - txHash := txn.Hash() - return &txHash, nil - } - - accHistoryC, err := tx.Cursor(kv.E2AccountsHistory) - if err != nil { - return nil, err - } - defer accHistoryC.Close() - - accChangesC, err := tx.CursorDupSort(kv.AccountChangeSet) - if err != nil { - return nil, err - } - defer accChangesC.Close() - - // Locate the chunk where the nonce happens - acs := historyv2.Mapper[kv.AccountChangeSet] - k, v, err := accHistoryC.Seek(acs.IndexChunkKey(addr.Bytes(), 0)) - if err != nil { - return nil, err - } - - bitmap := roaring64.New() - maxBlPrevChunk := uint64(0) - - for { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - if k == nil || !bytes.HasPrefix(k, addr.Bytes()) { - // Check plain state - data, err := tx.GetOne(kv.PlainState, addr.Bytes()) - if err != nil { - return nil, err - } - if err := acc.DecodeForStorage(data); err != nil { - return nil, err - } - // Nonce changed in plain state, so it means the last block of last chunk - // contains the actual nonce change - if acc.Nonce > nonce { - break - } - // Not found; asked for nonce still not used - return nil, nil + if len(v) == 0 { // creation, but maybe not our Incarnation + prevTxnID = txnID + continue } - // Inspect block changeset - if _, err := bitmap.ReadFrom(bytes.NewReader(v)); err != nil { - return nil, err - } - maxBl := bitmap.Maximum() - data, err := acs.Find(accChangesC, maxBl, addr.Bytes()) - if err != nil { - return nil, err - } - if err := acc.DecodeForStorage(data); err != nil { + if err := accounts.DeserialiseV3(&acc, v); err != nil { return nil, err } - // Desired nonce was found in this chunk if acc.Nonce > nonce { break } + prevTxnID = txnID + } - maxBlPrevChunk = maxBl - k, v, err = accHistoryC.Next() + // The sort.Search function finds the first block where the incarnation has + // changed to the desired one, so we get the previous block from the bitmap; + // however if the creationTxnID block is already the first one from the bitmap, it means + // the block we want is the max block from the previous shard. + var creationTxnID uint64 + var searchErr error + + if nextTxnID == 0 { + nextTxnID = prevTxnID + 1 + } + // Binary search in [prevTxnID, nextTxnID] range; get first block where desired incarnation appears + // can be replaced by full-scan over ttx.HistoryRange([prevTxnID, nextTxnID])? + idx := sort.Search(int(nextTxnID-prevTxnID), func(i int) bool { + txnID := uint64(i) + prevTxnID + v, ok, err := ttx.HistorySeek(kv.AccountsHistory, addr[:], txnID) if err != nil { - return nil, err + log.Error("[rpc] Unexpected error, couldn't find changeset", "txNum", i, "addr", addr) + panic(err) } - } - - // Locate the exact block inside chunk when the nonce changed - blocks := bitmap.ToArray() - var errSearch error = nil - idx := sort.Search(len(blocks), func(i int) bool { - if errSearch != nil { + if !ok { return false } - - // Locate the block changeset - data, err := acs.Find(accChangesC, blocks[i], addr.Bytes()) - if err != nil { - errSearch = err + if len(v) == 0 { + creationTxnID = cmp.Max(creationTxnID, txnID) return false } - if err := acc.DecodeForStorage(data); err != nil { - errSearch = err + if err := accounts.DeserialiseV3(&acc, v); err != nil { + searchErr = err return false } @@ -247,27 +103,46 @@ func (api *OtterscanAPIImpl) GetTransactionBySenderAndNonce(ctx context.Context, // the block when the nonce changed to be > the desired once, which means the // previous history block contains the actual change; it may contain multiple // nonce changes. - return acc.Nonce > nonce + if acc.Nonce <= nonce { + creationTxnID = cmp.Max(creationTxnID, txnID) + return false + } + return true }) - if errSearch != nil { - return nil, errSearch + if searchErr != nil { + return nil, searchErr } - - // Since the changeset contains the state BEFORE the change, we inspect - // the block before the one we found; if it is the first block inside the chunk, - // we use the last block from prev chunk - nonceBlock := maxBlPrevChunk - if idx > 0 { - nonceBlock = blocks[idx-1] + if creationTxnID == 0 { + return nil, fmt.Errorf("binary search between %d-%d doesn't find anything", nextTxnID, prevTxnID) + } + ok, bn, err := rawdbv3.TxNums.FindBlockNum(tx, creationTxnID) + if err != nil { + return nil, err + } + if !ok { + return nil, fmt.Errorf("block not found by txnID=%d", creationTxnID) } - found, txHash, err := api.findNonce(ctx, tx, addr, nonce, nonceBlock) + minTxNum, err := rawdbv3.TxNums.Min(tx, bn) if err != nil { return nil, err } + txIndex := int(creationTxnID) - int(minTxNum) - 1 /* system-tx */ + if txIndex == -1 { + txIndex = (idx + int(prevTxnID)) - int(minTxNum) - 1 + } + txn, err := api._txnReader.TxnByIdxInBlock(ctx, ttx, bn, txIndex) + if err != nil { + return nil, err + } + if txn == nil { + log.Warn("[rpc] tx is nil", "blockNum", bn, "txIndex", txIndex) + return nil, nil + } + found := txn.GetNonce() == nonce if !found { return nil, nil } - + txHash := txn.Hash() return &txHash, nil } diff --git a/turbo/jsonrpc/overlay_api.go b/turbo/jsonrpc/overlay_api.go index ca551435bba..86290ca7717 100644 --- a/turbo/jsonrpc/overlay_api.go +++ b/turbo/jsonrpc/overlay_api.go @@ -135,7 +135,7 @@ func (api *OverlayAPIImpl) CallConstructor(ctx context.Context, address common.A replayTransactions = block.Transactions()[:transactionIndex] - stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNum-1)), 0, api.filters, api.stateCache, api.historyV3(tx), chainConfig.ChainName) + stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNum-1)), 0, api.filters, api.stateCache, chainConfig.ChainName) if err != nil { return nil, err } @@ -309,7 +309,7 @@ func (api *OverlayAPIImpl) GetLogs(ctx context.Context, crit filters.FilterCrite } // try to recompute the state - stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNumber-1)), 0, api.filters, api.stateCache, api.historyV3(tx), chainConfig.ChainName) + stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNumber-1)), 0, api.filters, api.stateCache, chainConfig.ChainName) if err != nil { results[task.idx] = &blockReplayResult{BlockNumber: task.BlockNumber, Error: err.Error()} continue diff --git a/turbo/jsonrpc/parity_api.go b/turbo/jsonrpc/parity_api.go index b1ef45f50db..f58b0e98422 100644 --- a/turbo/jsonrpc/parity_api.go +++ b/turbo/jsonrpc/parity_api.go @@ -2,12 +2,10 @@ package jsonrpc import ( "context" - "encoding/binary" "fmt" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutility" - "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" @@ -52,64 +50,36 @@ func (api *ParityAPIImpl) ListStorageKeys(ctx context.Context, account libcommon return nil, fmt.Errorf("listStorageKeys cannot open tx: %w", err) } defer tx.Rollback() - a, err := rpchelper.NewLatestStateReader(tx, api.historyV3(tx)).ReadAccountData(account) + a, err := rpchelper.NewLatestStateReader(tx).ReadAccountData(account) if err != nil { return nil, err } else if a == nil { return nil, fmt.Errorf("acc not found") } - if api.historyV3(tx) { - bn := rawdb.ReadCurrentBlockNumber(tx) - minTxNum, err := rawdbv3.TxNums.Min(tx, *bn) - if err != nil { - return nil, err - } - - from := account[:] - if offset != nil { - from = append(from, *offset...) - } - to, _ := kv.NextSubtree(account[:]) - r, err := tx.(kv.TemporalTx).DomainRange(kv.StorageDomain, from, to, minTxNum, order.Asc, quantity) - if err != nil { - return nil, err - } - defer r.Close() - for r.HasNext() { - k, _, err := r.Next() - if err != nil { - return nil, err - } - keys = append(keys, libcommon.CopyBytes(k[20:])) - } - return keys, nil - } - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, a.GetIncarnation()) - seekBytes := append(account.Bytes(), b...) - - c, err := tx.CursorDupSort(kv.PlainState) + bn := rawdb.ReadCurrentBlockNumber(tx) + minTxNum, err := rawdbv3.TxNums.Min(tx, *bn) if err != nil { return nil, err } - defer c.Close() - var v []byte - var seekVal []byte - if offset != nil { - seekVal = *offset - } - for v, err = c.SeekBothRange(seekBytes, seekVal); v != nil && len(keys) != quantity && err == nil; _, v, err = c.NextDup() { - if len(v) > length.Hash { - keys = append(keys, v[:length.Hash]) - } else { - keys = append(keys, v) - } + from := account[:] + if offset != nil { + from = append(from, *offset...) } + to, _ := kv.NextSubtree(account[:]) + r, err := tx.(kv.TemporalTx).DomainRange(kv.StorageDomain, from, to, minTxNum, order.Asc, quantity) if err != nil { return nil, err } + defer r.Close() + for r.HasNext() { + k, _, err := r.Next() + if err != nil { + return nil, err + } + keys = append(keys, libcommon.CopyBytes(k[20:])) + } return keys, nil } diff --git a/turbo/jsonrpc/trace_adhoc.go b/turbo/jsonrpc/trace_adhoc.go index 790344fb46b..6350d87449e 100644 --- a/turbo/jsonrpc/trace_adhoc.go +++ b/turbo/jsonrpc/trace_adhoc.go @@ -905,7 +905,7 @@ func (api *TraceAPIImpl) Call(ctx context.Context, args TraceCallParam, traceTyp return nil, err } - stateReader, err := rpchelper.CreateStateReader(ctx, tx, *blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), chainConfig.ChainName) + stateReader, err := rpchelper.CreateStateReader(ctx, tx, *blockNrOrHash, 0, api.filters, api.stateCache, chainConfig.ChainName) if err != nil { return nil, err } @@ -1117,7 +1117,7 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type if err != nil { return nil, nil, err } - stateReader, err := rpchelper.CreateStateReader(ctx, dbtx, *parentNrOrHash, 0, api.filters, api.stateCache, api.historyV3(dbtx), chainConfig.ChainName) + stateReader, err := rpchelper.CreateStateReader(ctx, dbtx, *parentNrOrHash, 0, api.filters, api.stateCache, chainConfig.ChainName) if err != nil { return nil, nil, err } diff --git a/turbo/jsonrpc/trace_filtering.go b/turbo/jsonrpc/trace_filtering.go index 68ead9fd5e0..31f871e8465 100644 --- a/turbo/jsonrpc/trace_filtering.go +++ b/turbo/jsonrpc/trace_filtering.go @@ -8,14 +8,12 @@ import ( "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/ledgerwatch/erigon/eth/consensuschain" - "github.com/RoaringBitmap/roaring/roaring64" jsoniter "github.com/json-iterator/go" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/bitmapdb" "github.com/ledgerwatch/erigon-lib/kv/iter" "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" @@ -235,59 +233,6 @@ func (api *TraceAPIImpl) Block(ctx context.Context, blockNr rpc.BlockNumber, gas return out, err } -func traceFilterBitmaps(tx kv.Tx, req TraceFilterRequest, from, to uint64) (fromAddresses, toAddresses map[common.Address]struct{}, allBlocks *roaring64.Bitmap, err error) { - fromAddresses = make(map[common.Address]struct{}, len(req.FromAddress)) - toAddresses = make(map[common.Address]struct{}, len(req.ToAddress)) - allBlocks = roaring64.New() - var blocksTo roaring64.Bitmap - for _, addr := range req.FromAddress { - if addr != nil { - b, err := bitmapdb.Get64(tx, kv.CallFromIndex, addr.Bytes(), from, to) - if err != nil { - if errors.Is(err, ethdb.ErrKeyNotFound) { - continue - } - return nil, nil, nil, err - } - allBlocks.Or(b) - fromAddresses[*addr] = struct{}{} - } - } - - for _, addr := range req.ToAddress { - if addr != nil { - b, err := bitmapdb.Get64(tx, kv.CallToIndex, addr.Bytes(), from, to) - if err != nil { - if errors.Is(err, ethdb.ErrKeyNotFound) { - continue - } - return nil, nil, nil, err - } - blocksTo.Or(b) - toAddresses[*addr] = struct{}{} - } - } - - switch req.Mode { - case TraceFilterModeIntersection: - allBlocks.And(&blocksTo) - case TraceFilterModeUnion: - fallthrough - default: - allBlocks.Or(&blocksTo) - } - - // Special case - if no addresses specified, take all traces - if len(req.FromAddress) == 0 && len(req.ToAddress) == 0 { - allBlocks.AddRange(from, to) - } else { - allBlocks.RemoveRange(0, from) - allBlocks.RemoveRange(to, uint64(0x100000000)) - } - - return fromAddresses, toAddresses, allBlocks, nil -} - func traceFilterBitmapsV3(tx kv.TemporalTx, req TraceFilterRequest, from, to uint64) (fromAddresses, toAddresses map[common.Address]struct{}, allBlocks iter.U64, err error) { fromAddresses = make(map[common.Address]struct{}, len(req.FromAddress)) toAddresses = make(map[common.Address]struct{}, len(req.ToAddress)) @@ -340,6 +285,7 @@ func traceFilterBitmapsV3(tx kv.TemporalTx, req TraceFilterRequest, from, to uin // Pull blocks which have txs with matching address func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, gasBailOut *bool, stream *jsoniter.Stream) error { if gasBailOut == nil { + //nolint gasBailOut = new(bool) // false by default } dbtx, err1 := api.kv.BeginRo(ctx) @@ -366,169 +312,10 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, gas return fmt.Errorf("invalid parameters: fromBlock cannot be greater than toBlock") } - if api.historyV3(dbtx) { - return api.filterV3(ctx, dbtx.(kv.TemporalTx), fromBlock, toBlock, req, stream) - } - toBlock++ //+1 because internally Erigon using semantic [from, to), but some RPC have different semantic - fromAddresses, toAddresses, allBlocks, err := traceFilterBitmaps(dbtx, req, fromBlock, toBlock) - if err != nil { - return err - } - - chainConfig, err := api.chainConfig(ctx, dbtx) - if err != nil { - return err - } - - var json = jsoniter.ConfigCompatibleWithStandardLibrary - stream.WriteArrayStart() - first := true - // Execute all transactions in picked blocks - - count := uint64(^uint(0)) // this just makes it easier to use below - if req.Count != nil { - count = *req.Count - } - after := uint64(0) // this just makes it easier to use below - if req.After != nil { - after = *req.After - } - nSeen := uint64(0) - nExported := uint64(0) - - it := allBlocks.Iterator() - for it.HasNext() { - b := it.Next() - // Extract transactions from block - block, bErr := api.blockByNumberWithSenders(ctx, dbtx, b) - if bErr != nil { - if first { - first = false - } else { - stream.WriteMore() - } - stream.WriteObjectStart() - rpc.HandleError(bErr, stream) - stream.WriteObjectEnd() - continue - } - if block == nil { - if first { - first = false - } else { - stream.WriteMore() - } - stream.WriteObjectStart() - rpc.HandleError(fmt.Errorf("could not find block %d", b), stream) - stream.WriteObjectEnd() - continue - } - - blockHash := block.Hash() - blockNumber := block.NumberU64() - signer := types.MakeSigner(chainConfig, b, block.Time()) - t, syscall, tErr := api.callManyTransactions(ctx, dbtx, block, []string{TraceTypeTrace}, -1 /* all tx indices */, *gasBailOut, signer, chainConfig) - if tErr != nil { - if first { - first = false - } else { - stream.WriteMore() - } - stream.WriteObjectStart() - rpc.HandleError(tErr, stream) - stream.WriteObjectEnd() - continue - } - isIntersectionMode := req.Mode == TraceFilterModeIntersection - includeAll := len(fromAddresses) == 0 && len(toAddresses) == 0 - for i, trace := range t { - txPosition := uint64(i) - // Check if transaction concerns any of the addresses we wanted - for _, pt := range trace.Trace { - if includeAll || filterTrace(pt, fromAddresses, toAddresses, isIntersectionMode) { - nSeen++ - pt.BlockHash = &blockHash - pt.BlockNumber = &blockNumber - pt.TransactionHash = trace.TransactionHash - pt.TransactionPosition = &txPosition - b, err := json.Marshal(pt) - if err != nil { - if first { - first = false - } else { - stream.WriteMore() - } - stream.WriteObjectStart() - rpc.HandleError(err, stream) - stream.WriteObjectEnd() - continue - } - if nSeen > after && nExported < count { - if first { - first = false - } else { - stream.WriteMore() - } - if _, err := stream.Write(b); err != nil { - return err - } - nExported++ - } - } - } - } - - rewards, err := api.engine().CalculateRewards(chainConfig, block.Header(), block.Uncles(), syscall) - if err != nil { - return err - } - - for _, r := range rewards { - if _, ok := toAddresses[r.Beneficiary]; ok || includeAll { - nSeen++ - var tr ParityTrace - rewardAction := &RewardTraceAction{} - rewardAction.Author = r.Beneficiary - rewardAction.RewardType = rewardKindToString(r.Kind) - rewardAction.Value.ToInt().Set(r.Amount.ToBig()) - tr.Action = rewardAction - tr.BlockHash = &common.Hash{} - copy(tr.BlockHash[:], block.Hash().Bytes()) - tr.BlockNumber = new(uint64) - *tr.BlockNumber = block.NumberU64() - tr.Type = "reward" // nolint: goconst - tr.TraceAddress = []int{} - b, err := json.Marshal(tr) - if err != nil { - if first { - first = false - } else { - stream.WriteMore() - } - stream.WriteObjectStart() - rpc.HandleError(err, stream) - stream.WriteObjectEnd() - continue - } - if nSeen > after && nExported < count { - if first { - first = false - } else { - stream.WriteMore() - } - if _, err := stream.Write(b); err != nil { - return err - } - nExported++ - } - } - } - } - stream.WriteArrayEnd() - return stream.Flush() + return api.filterV3(ctx, dbtx.(kv.TemporalTx), fromBlock, toBlock, req, stream, *gasBailOut) } -func (api *TraceAPIImpl) filterV3(ctx context.Context, dbtx kv.TemporalTx, fromBlock, toBlock uint64, req TraceFilterRequest, stream *jsoniter.Stream) error { +func (api *TraceAPIImpl) filterV3(ctx context.Context, dbtx kv.TemporalTx, fromBlock, toBlock uint64, req TraceFilterRequest, stream *jsoniter.Stream, gasBailOut bool) error { var fromTxNum, toTxNum uint64 var err error if fromBlock > 0 { @@ -790,7 +577,7 @@ func (api *TraceAPIImpl) filterV3(ctx context.Context, dbtx kv.TemporalTx, fromB gp := new(core.GasPool).AddGas(msg.Gas()).AddBlobGas(msg.BlobGas()) ibs.SetTxContext(txHash, lastBlockHash, txIndex) var execResult *core.ExecutionResult - execResult, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */) + execResult, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, gasBailOut) if err != nil { if first { first = false @@ -926,7 +713,7 @@ func (api *TraceAPIImpl) callManyTransactions( } callParams := make([]TraceCallParam, 0, len(txs)) - reader, err := rpchelper.CreateHistoryStateReader(dbtx, blockNumber, txIndex, api.historyV3(dbtx), cfg.ChainName) + reader, err := rpchelper.CreateHistoryStateReader(dbtx, blockNumber, txIndex, cfg.ChainName) if err != nil { return nil, nil, err } diff --git a/turbo/jsonrpc/tracing.go b/turbo/jsonrpc/tracing.go index c891fa25fa4..b5d54309fa3 100644 --- a/turbo/jsonrpc/tracing.go +++ b/turbo/jsonrpc/tracing.go @@ -84,7 +84,7 @@ func (api *PrivateDebugAPIImpl) traceBlock(ctx context.Context, blockNrOrHash rp } engine := api.engine() - _, blockCtx, _, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, 0, api.historyV3(tx)) + _, blockCtx, _, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, 0) if err != nil { stream.WriteNil() return err @@ -275,7 +275,7 @@ func (api *PrivateDebugAPIImpl) TraceTransaction(ctx context.Context, hash commo } engine := api.engine() - msg, blockCtx, txCtx, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, txnIndex, api.historyV3(tx)) + msg, blockCtx, txCtx, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, txnIndex) if err != nil { stream.WriteNil() return err @@ -326,9 +326,9 @@ func (api *PrivateDebugAPIImpl) TraceCall(ctx context.Context, args ethapi.CallA var stateReader state.StateReader if config.TxIndex == nil || isLatest { - stateReader, err = rpchelper.CreateStateReader(ctx, dbtx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(dbtx), chainConfig.ChainName) + stateReader, err = rpchelper.CreateStateReader(ctx, dbtx, blockNrOrHash, 0, api.filters, api.stateCache, chainConfig.ChainName) } else { - stateReader, err = rpchelper.CreateHistoryStateReader(dbtx, blockNumber, int(*config.TxIndex), api.historyV3(dbtx), chainConfig.ChainName) + stateReader, err = rpchelper.CreateHistoryStateReader(dbtx, blockNumber, int(*config.TxIndex), chainConfig.ChainName) } if err != nil { return fmt.Errorf("create state reader: %v", err) @@ -446,7 +446,7 @@ func (api *PrivateDebugAPIImpl) TraceCallMany(ctx context.Context, bundles []Bun replayTransactions = block.Transactions()[:transactionIndex] - stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNum-1)), 0, api.filters, api.stateCache, api.historyV3(tx), chainConfig.ChainName) + stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNum-1)), 0, api.filters, api.stateCache, chainConfig.ChainName) if err != nil { stream.WriteNil() return err diff --git a/turbo/rpchelper/helper.go b/turbo/rpchelper/helper.go index bae2ab70897..824d0afa891 100644 --- a/turbo/rpchelper/helper.go +++ b/turbo/rpchelper/helper.go @@ -12,7 +12,6 @@ import ( "github.com/ledgerwatch/erigon-lib/wrap" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/core/systemcontracts" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" borfinality "github.com/ledgerwatch/erigon/polygon/bor/finality" "github.com/ledgerwatch/erigon/polygon/bor/finality/whitelist" @@ -109,31 +108,26 @@ func _GetBlockNumber(requireCanonical bool, blockNrOrHash rpc.BlockNumberOrHash, return blockNumber, hash, blockNumber == plainStateBlockNumber, nil } -func CreateStateReader(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash, txnIndex int, filters *Filters, stateCache kvcache.Cache, historyV3 bool, chainName string) (state.StateReader, error) { +func CreateStateReader(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash, txnIndex int, filters *Filters, stateCache kvcache.Cache, chainName string) (state.StateReader, error) { blockNumber, _, latest, err := _GetBlockNumber(true, blockNrOrHash, tx, filters) if err != nil { return nil, err } - return CreateStateReaderFromBlockNumber(ctx, tx, blockNumber, latest, txnIndex, stateCache, historyV3, chainName) + return CreateStateReaderFromBlockNumber(ctx, tx, blockNumber, latest, txnIndex, stateCache, chainName) } -func CreateStateReaderFromBlockNumber(ctx context.Context, tx kv.Tx, blockNumber uint64, latest bool, txnIndex int, stateCache kvcache.Cache, historyV3 bool, chainName string) (state.StateReader, error) { +func CreateStateReaderFromBlockNumber(ctx context.Context, tx kv.Tx, blockNumber uint64, latest bool, txnIndex int, stateCache kvcache.Cache, chainName string) (state.StateReader, error) { if latest { cacheView, err := stateCache.View(ctx, tx) if err != nil { return nil, err } - return CreateLatestCachedStateReader(cacheView, tx, historyV3), nil + return CreateLatestCachedStateReader(cacheView, tx), nil } - return CreateHistoryStateReader(tx, blockNumber+1, txnIndex, historyV3, chainName) + return CreateHistoryStateReader(tx, blockNumber+1, txnIndex, chainName) } -func CreateHistoryStateReader(tx kv.Tx, blockNumber uint64, txnIndex int, historyV3 bool, chainName string) (state.StateReader, error) { - if !historyV3 { - r := state.NewPlainState(tx, blockNumber, systemcontracts.SystemContractCodeLookup[chainName]) - //r.SetTrace(true) - return r, nil - } +func CreateHistoryStateReader(tx kv.Tx, blockNumber uint64, txnIndex int, chainName string) (state.StateReader, error) { r := state.NewHistoryReaderV3() r.SetTx(tx) //r.SetTrace(true) @@ -145,28 +139,19 @@ func CreateHistoryStateReader(tx kv.Tx, blockNumber uint64, txnIndex int, histor return r, nil } -func NewLatestStateReader(tx kv.Tx, histV3 bool) state.StateReader { - if histV3 { - return state.NewReaderV4(tx.(kv.TemporalGetter)) - } - return state.NewPlainStateReader(tx) +func NewLatestStateReader(tx kv.Tx) state.StateReader { + return state.NewReaderV4(tx.(kv.TemporalGetter)) } -func NewLatestStateWriter(txc wrap.TxContainer, blockNum uint64, histV3 bool) state.StateWriter { - if histV3 { - domains := txc.Doms - minTxNum, err := rawdbv3.TxNums.Min(domains.Tx(), blockNum) - if err != nil { - panic(err) - } - domains.SetTxNum(uint64(int(minTxNum) + /* 1 system txNum in begining of block */ 1)) - return state.NewWriterV4(domains) +func NewLatestStateWriter(txc wrap.TxContainer, blockNum uint64) state.StateWriter { + domains := txc.Doms + minTxNum, err := rawdbv3.TxNums.Min(domains.Tx(), blockNum) + if err != nil { + panic(err) } - return state.NewPlainStateWriter(txc.Tx, txc.Tx, blockNum) + domains.SetTxNum(uint64(int(minTxNum) + /* 1 system txNum in begining of block */ 1)) + return state.NewWriterV4(domains) } -func CreateLatestCachedStateReader(cache kvcache.CacheView, tx kv.Tx, histV3 bool) state.StateReader { - if histV3 { - return state.NewCachedReader3(cache, tx.(kv.TemporalTx)) - } - return state.NewCachedReader2(cache, tx) +func CreateLatestCachedStateReader(cache kvcache.CacheView, tx kv.Tx) state.StateReader { + return state.NewCachedReader3(cache, tx.(kv.TemporalTx)) } diff --git a/turbo/snapshotsync/snapshotsync.go b/turbo/snapshotsync/snapshotsync.go index 6cfdba055d8..ceb342a6a02 100644 --- a/turbo/snapshotsync/snapshotsync.go +++ b/turbo/snapshotsync/snapshotsync.go @@ -257,7 +257,7 @@ func computeBlocksToPrune(blockReader services.FullBlockReader, p prune.Mode) (b // WaitForDownloader - wait for Downloader service to download all expected snapshots // for MVP we sync with Downloader only once, in future will send new snapshots also -func WaitForDownloader(ctx context.Context, logPrefix string, headerchain, histV3, blobs bool, prune prune.Mode, caplin CaplinMode, agg *state.Aggregator, tx kv.RwTx, blockReader services.FullBlockReader, cc *chain.Config, snapshotDownloader proto_downloader.DownloaderClient, stagesIdsList []string) error { +func WaitForDownloader(ctx context.Context, logPrefix string, headerchain, blobs bool, prune prune.Mode, caplin CaplinMode, agg *state.Aggregator, tx kv.RwTx, blockReader services.FullBlockReader, cc *chain.Config, snapshotDownloader proto_downloader.DownloaderClient, stagesIdsList []string) error { snapshots := blockReader.Snapshots() borSnapshots := blockReader.BorSnapshots() @@ -311,11 +311,6 @@ func WaitForDownloader(ctx context.Context, logPrefix string, headerchain, histV // build all download requests for _, p := range preverifiedBlockSnapshots { - if !histV3 { - if strings.HasPrefix(p.Name, "domain") || strings.HasPrefix(p.Name, "history") || strings.HasPrefix(p.Name, "idx") { - continue - } - } if caplin == NoCaplin && (strings.Contains(p.Name, "beaconblocks") || strings.Contains(p.Name, "blobsidecars")) { continue } diff --git a/turbo/stages/genesis_test.go b/turbo/stages/genesis_test.go index f0dd46660cf..e2118f7166b 100644 --- a/turbo/stages/genesis_test.go +++ b/turbo/stages/genesis_test.go @@ -180,7 +180,7 @@ func TestSetupGenesis(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() dirs := datadir.New(tmpdir) - _, db, _ := temporaltest.NewTestDB(t, dirs) + db, _ := temporaltest.NewTestDB(t, dirs) blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, dirs.Snap, 0, log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, dirs.Snap, 0, log.New())) config, genesis, err := test.fn(t, db) // Check the return values. diff --git a/turbo/stages/mock/mock_sentry.go b/turbo/stages/mock/mock_sentry.go index b92310835eb..c92613741af 100644 --- a/turbo/stages/mock/mock_sentry.go +++ b/turbo/stages/mock/mock_sentry.go @@ -260,8 +260,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK logger := log.New() ctx, ctxCancel := context.WithCancel(context.Background()) - histV3, db, agg := temporaltest.NewTestDB(nil, dirs) - cfg.HistoryV3 = histV3 + db, agg := temporaltest.NewTestDB(nil, dirs) erigonGrpcServeer := remotedbserver.NewKvServer(ctx, db, nil, nil, nil, logger) allSnapshots := freezeblocks.NewRoSnapshots(ethconfig.Defaults.Snapshot, dirs.Snap, 0, logger) @@ -283,12 +282,12 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK PeerId: gointerfaces.ConvertHashToH512([64]byte{0x12, 0x34, 0x50}), // "12345" BlockSnapshots: allSnapshots, BlockReader: freezeblocks.NewBlockReader(allSnapshots, allBorSnapshots), - HistoryV3: cfg.HistoryV3, + HistoryV3: true, } if tb != nil { tb.Cleanup(mock.Close) } - blockWriter := blockio.NewBlockWriter(mock.HistoryV3) + blockWriter := blockio.NewBlockWriter() mock.Address = crypto.PubkeyToAddress(mock.Key.PublicKey) @@ -313,7 +312,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK shanghaiTime := mock.ChainConfig.ShanghaiTime cancunTime := mock.ChainConfig.CancunTime maxBlobsPerBlock := mock.ChainConfig.GetMaxBlobsPerBlock() - mock.TxPool, err = txpool.New(newTxs, mock.DB, poolCfg, kvcache.NewDummy(histV3), *chainID, shanghaiTime, nil /* agraBlock */, cancunTime, maxBlobsPerBlock, nil, logger) + mock.TxPool, err = txpool.New(newTxs, mock.DB, poolCfg, kvcache.NewDummy(), *chainID, shanghaiTime, nil /* agraBlock */, cancunTime, maxBlobsPerBlock, nil, logger) if err != nil { tb.Fatal(err) } @@ -354,16 +353,12 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK dirs, notifications, mock.BlockReader, blockWriter, mock.agg, nil, terseLogger) chainReader := consensuschain.NewReader(mock.ChainConfig, txc.Tx, mock.BlockReader, logger) // We start the mining step - if err := stages2.StateStep(ctx, chainReader, mock.Engine, txc, stateSync, header, body, unwindPoint, headersChain, bodiesChain, histV3); err != nil { + if err := stages2.StateStep(ctx, chainReader, mock.Engine, txc, stateSync, header, body, unwindPoint, headersChain, bodiesChain); err != nil { logger.Warn("Could not validate block", "err", err) return err } var progress uint64 - if histV3 { - progress, err = stages.GetStageProgress(txc.Tx, stages.Execution) - } else { - progress, err = stages.GetStageProgress(txc.Tx, stages.IntermediateHashes) - } + progress, err = stages.GetStageProgress(txc.Tx, stages.Execution) if err != nil { return err } @@ -466,7 +461,6 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK mock.Notifications.Accumulator, cfg.StateStream, /*stateStream=*/ false, - /*exec22=*/ cfg.HistoryV3, dirs, mock.BlockReader, mock.sentriesClient.Hd, @@ -492,14 +486,15 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK agg.SetSnapshotBuildSema(blockSnapBuildSema) blockRetire := freezeblocks.NewBlockRetire(1, dirs, mock.BlockReader, blockWriter, mock.DB, mock.ChainConfig, mock.Notifications.Events, blockSnapBuildSema, logger) + historyV3 := true mock.Sync = stagedsync.New( cfg.Sync, stagedsync.DefaultStages(mock.Ctx, - stagedsync.StageSnapshotsCfg(mock.DB, *mock.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, mock.BlockReader, mock.Notifications, mock.HistoryV3, mock.agg, false, false, nil, prune), - stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, cfg.Sync, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, mock.BlockReader, blockWriter, dirs.Tmp, mock.HistoryV3, mock.Notifications, nil), + stagedsync.StageSnapshotsCfg(mock.DB, *mock.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, mock.BlockReader, mock.Notifications, mock.agg, false, false, nil, prune), + stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, cfg.Sync, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, mock.BlockReader, blockWriter, dirs.Tmp, mock.Notifications, nil), stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, stagedsync.MiningState{}, *mock.ChainConfig, nil /* heimdallClient */, mock.BlockReader, nil, nil, nil, recents, signatures, false, nil), stagedsync.StageBlockHashesCfg(mock.DB, mock.Dirs.Tmp, mock.ChainConfig, blockWriter), - stagedsync.StageBodiesCfg(mock.DB, mock.sentriesClient.Bd, sendBodyRequest, penalize, blockPropagator, cfg.Sync.BodyDownloadTimeoutSeconds, *mock.ChainConfig, mock.BlockReader, cfg.HistoryV3, blockWriter, nil), + stagedsync.StageBodiesCfg(mock.DB, mock.sentriesClient.Bd, sendBodyRequest, penalize, blockPropagator, cfg.Sync.BodyDownloadTimeoutSeconds, *mock.ChainConfig, mock.BlockReader, blockWriter, nil), stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, cfg.Sync, false, dirs.Tmp, prune, mock.BlockReader, mock.sentriesClient.Hd, nil), stagedsync.StageExecuteBlocksCfg( mock.DB, @@ -512,7 +507,6 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK mock.Notifications.Accumulator, cfg.StateStream, /*stateStream=*/ false, - /*exec22=*/ cfg.HistoryV3, dirs, mock.BlockReader, mock.sentriesClient.Hd, @@ -521,8 +515,8 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK mock.agg, nil, ), - stagedsync.StageHashStateCfg(mock.DB, mock.Dirs, cfg.HistoryV3), - stagedsync.StageTrieCfg(mock.DB, checkStateRoot, true, false, dirs.Tmp, mock.BlockReader, mock.sentriesClient.Hd, cfg.HistoryV3, mock.agg), + stagedsync.StageHashStateCfg(mock.DB, mock.Dirs), + stagedsync.StageTrieCfg(mock.DB, checkStateRoot, true, false, dirs.Tmp, mock.BlockReader, mock.sentriesClient.Hd, historyV3, mock.agg), stagedsync.StageHistoryCfg(mock.DB, prune, dirs.Tmp), stagedsync.StageLogIndexCfg(mock.DB, prune, dirs.Tmp, nil), stagedsync.StageCallTracesCfg(mock.DB, prune, 0, dirs.Tmp), @@ -539,7 +533,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK snapDownloader, mock.BlockReader, blockRetire, mock.agg, nil, forkValidator, logger, checkStateRoot) mock.posStagedSync = stagedsync.New(cfg.Sync, pipelineStages, stagedsync.PipelineUnwindOrder, stagedsync.PipelinePruneOrder, logger) - mock.Eth1ExecutionService = eth1.NewEthereumExecutionModule(mock.BlockReader, mock.DB, mock.posStagedSync, forkValidator, mock.ChainConfig, assembleBlockPOS, nil, mock.Notifications.Accumulator, mock.Notifications.StateChangesConsumer, logger, engine, histV3, cfg.Sync, ctx) + mock.Eth1ExecutionService = eth1.NewEthereumExecutionModule(mock.BlockReader, mock.DB, mock.posStagedSync, forkValidator, mock.ChainConfig, assembleBlockPOS, nil, mock.Notifications.Accumulator, mock.Notifications.StateChangesConsumer, logger, engine, cfg.Sync, ctx) mock.sentriesClient.Hd.StartPoSDownloader(mock.Ctx, sendHeaderRequest, penalize) @@ -559,7 +553,6 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK mock.Notifications.Accumulator, cfg.StateStream, /*stateStream=*/ false, - /*exec22=*/ cfg.HistoryV3, dirs, mock.BlockReader, mock.sentriesClient.Hd, @@ -589,15 +582,14 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK go mock.sentriesClient.RecvUploadHeadersMessageLoop(mock.Ctx, mock.SentryClient, &mock.ReceiveWg) mock.StreamWg.Wait() - if histV3 { - c := &core.ChainPack{ - Headers: []*types.Header{mock.Genesis.HeaderNoCopy()}, - Blocks: []*types.Block{mock.Genesis}, - TopBlock: mock.Genesis, - } - if err = mock.InsertChain(c); err != nil { - tb.Fatal(err) - } + //app expecting that genesis will always be in db + c := &core.ChainPack{ + Headers: []*types.Header{mock.Genesis.HeaderNoCopy()}, + Blocks: []*types.Block{mock.Genesis}, + TopBlock: mock.Genesis, + } + if err = mock.InsertChain(c); err != nil { + tb.Fatal(err) } return mock } @@ -847,7 +839,7 @@ func (ms *MockSentry) HeaderDownload() *headerdownload.HeaderDownload { } func (ms *MockSentry) NewHistoryStateReader(blockNum uint64, tx kv.Tx) state.StateReader { - r, err := rpchelper.CreateHistoryStateReader(tx, blockNum, 0, ms.HistoryV3, ms.ChainConfig.ChainName) + r, err := rpchelper.CreateHistoryStateReader(tx, blockNum, 0, ms.ChainConfig.ChainName) if err != nil { panic(err) } @@ -865,5 +857,5 @@ func (ms *MockSentry) HistoryV3Components() *libstate.Aggregator { } func (ms *MockSentry) BlocksIO() (services.FullBlockReader, *blockio.BlockWriter) { - return ms.BlockReader, blockio.NewBlockWriter(ms.HistoryV3) + return ms.BlockReader, blockio.NewBlockWriter() } diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index f50d7182a34..66de6f1543e 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -381,10 +381,6 @@ func MiningStep(ctx context.Context, db kv.RwDB, mining *stagedsync.Sync, tmpDir } defer tx.Rollback() - //histV3, err := kvcfg.HistoryV3.Enabled(tx) - //if err != nil { - // return err - //} var miningBatch kv.RwTx //if histV3 { // sd := state.NewSharedDomains(tx) @@ -410,7 +406,7 @@ func MiningStep(ctx context.Context, db kv.RwDB, mining *stagedsync.Sync, tmpDir return nil } -func addAndVerifyBlockStep(batch kv.RwTx, engine consensus.Engine, chainReader consensus.ChainReader, currentHeader *types.Header, currentBody *types.RawBody, histV3 bool) error { +func addAndVerifyBlockStep(batch kv.RwTx, engine consensus.Engine, chainReader consensus.ChainReader, currentHeader *types.Header, currentBody *types.RawBody) error { currentHeight := currentHeader.Number.Uint64() currentHash := currentHeader.Hash() if chainReader != nil { @@ -440,7 +436,7 @@ func addAndVerifyBlockStep(batch kv.RwTx, engine consensus.Engine, chainReader c if _, err := rawdb.WriteRawBodyIfNotExists(batch, currentHash, currentHeight, currentBody); err != nil { return err } - if histV3 && prevHash != currentHash { + if prevHash != currentHash { if err := rawdb.AppendCanonicalTxNums(batch, currentHeight); err != nil { return err } @@ -454,7 +450,7 @@ func addAndVerifyBlockStep(batch kv.RwTx, engine consensus.Engine, chainReader c return nil } -func StateStep(ctx context.Context, chainReader consensus.ChainReader, engine consensus.Engine, txc wrap.TxContainer, stateSync *stagedsync.Sync, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody, histV3 bool) (err error) { +func StateStep(ctx context.Context, chainReader consensus.ChainReader, engine consensus.Engine, txc wrap.TxContainer, stateSync *stagedsync.Sync, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody) (err error) { defer func() { if rec := recover(); rec != nil { err = fmt.Errorf("%+v, trace: %s", rec, dbg.Stack()) @@ -479,7 +475,7 @@ func StateStep(ctx context.Context, chainReader consensus.ChainReader, engine co currentHeader := headersChain[i] currentBody := bodiesChain[i] - if err := addAndVerifyBlockStep(txc.Tx, engine, chainReader, currentHeader, currentBody, histV3); err != nil { + if err := addAndVerifyBlockStep(txc.Tx, engine, chainReader, currentHeader, currentBody); err != nil { return err } // Run state sync @@ -493,7 +489,7 @@ func StateStep(ctx context.Context, chainReader consensus.ChainReader, engine co return nil } // Prepare memory state for block execution - if err := addAndVerifyBlockStep(txc.Tx, engine, chainReader, header, body, histV3); err != nil { + if err := addAndVerifyBlockStep(txc.Tx, engine, chainReader, header, body); err != nil { return err } // Run state sync @@ -529,7 +525,7 @@ func NewDefaultStages(ctx context.Context, logger log.Logger, ) []*stagedsync.Stage { dirs := cfg.Dirs - blockWriter := blockio.NewBlockWriter(cfg.HistoryV3) + blockWriter := blockio.NewBlockWriter() // During Import we don't want other services like header requests, body requests etc. to be running. // Hence we run it in the test mode. @@ -563,12 +559,13 @@ func NewDefaultStages(ctx context.Context, depositContract = cfg.Genesis.Config.DepositContract } + historyV3 := true return stagedsync.DefaultStages(ctx, - stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm, cfg.Prune), - stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, cfg.HistoryV3, notifications, loopBreakCheck), + stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm, cfg.Prune), + stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications, loopBreakCheck), stagedsync.StageBorHeimdallCfg(db, snapDb, stagedsync.MiningState{}, *controlServer.ChainConfig, heimdallClient, blockReader, controlServer.Hd, controlServer.Penalize, loopBreakCheck, recents, signatures, cfg.WithHeimdallWaypointRecording, nil), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), - stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, cfg.HistoryV3, blockWriter, loopBreakCheck), + stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, blockWriter, loopBreakCheck), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, loopBreakCheck), stagedsync.StageExecuteBlocksCfg( db, @@ -581,7 +578,6 @@ func NewDefaultStages(ctx context.Context, notifications.Accumulator, cfg.StateStream, /*stateStream=*/ false, - cfg.HistoryV3, dirs, blockReader, controlServer.Hd, @@ -590,8 +586,8 @@ func NewDefaultStages(ctx context.Context, agg, SilkwormForExecutionStage(silkworm, cfg), ), - stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3), - stagedsync.StageTrieCfg(db, true, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg), + stagedsync.StageHashStateCfg(db, dirs), + stagedsync.StageTrieCfg(db, true, true, false, dirs.Tmp, blockReader, controlServer.Hd, historyV3, agg), stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp), stagedsync.StageLogIndexCfg(db, cfg.Prune, dirs.Tmp, depositContract), stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, dirs.Tmp), @@ -616,7 +612,7 @@ func NewPipelineStages(ctx context.Context, checkStateRoot bool, ) []*stagedsync.Stage { dirs := cfg.Dirs - blockWriter := blockio.NewBlockWriter(cfg.HistoryV3) + blockWriter := blockio.NewBlockWriter() // During Import we don't want other services like header requests, body requests etc. to be running. // Hence we run it in the test mode. @@ -629,8 +625,9 @@ func NewPipelineStages(ctx context.Context, } if len(cfg.Sync.UploadLocation) == 0 { + historyV3 := true return stagedsync.PipelineStages(ctx, - stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm, cfg.Prune), + stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm, cfg.Prune), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, loopBreakCheck), stagedsync.StageExecuteBlocksCfg( @@ -644,7 +641,6 @@ func NewPipelineStages(ctx context.Context, notifications.Accumulator, cfg.StateStream, /*stateStream=*/ false, - cfg.HistoryV3, dirs, blockReader, controlServer.Hd, @@ -653,8 +649,8 @@ func NewPipelineStages(ctx context.Context, agg, SilkwormForExecutionStage(silkworm, cfg), ), - stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3), - stagedsync.StageTrieCfg(db, checkStateRoot, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg), + stagedsync.StageHashStateCfg(db, dirs), + stagedsync.StageTrieCfg(db, checkStateRoot, true, false, dirs.Tmp, blockReader, controlServer.Hd, historyV3, agg), stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp), stagedsync.StageLogIndexCfg(db, cfg.Prune, dirs.Tmp, depositContract), stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, dirs.Tmp), @@ -663,12 +659,13 @@ func NewPipelineStages(ctx context.Context, runInTestMode) } + historyV3 := true return stagedsync.UploaderPipelineStages(ctx, - stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm, cfg.Prune), - stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, cfg.HistoryV3, notifications, loopBreakCheck), + stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm, cfg.Prune), + stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications, loopBreakCheck), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, loopBreakCheck), - stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, cfg.HistoryV3, blockWriter, loopBreakCheck), + stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, blockWriter, loopBreakCheck), stagedsync.StageExecuteBlocksCfg( db, cfg.Prune, @@ -680,7 +677,6 @@ func NewPipelineStages(ctx context.Context, notifications.Accumulator, cfg.StateStream, /*stateStream=*/ false, - cfg.HistoryV3, dirs, blockReader, controlServer.Hd, @@ -689,8 +685,8 @@ func NewPipelineStages(ctx context.Context, agg, SilkwormForExecutionStage(silkworm, cfg), ), - stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3), - stagedsync.StageTrieCfg(db, checkStateRoot, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg), + stagedsync.StageHashStateCfg(db, dirs), + stagedsync.StageTrieCfg(db, checkStateRoot, true, false, dirs.Tmp, blockReader, controlServer.Hd, historyV3, agg), stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp), stagedsync.StageLogIndexCfg(db, cfg.Prune, dirs.Tmp, depositContract), stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, dirs.Tmp), @@ -703,11 +699,12 @@ func NewPipelineStages(ctx context.Context, func NewInMemoryExecution(ctx context.Context, db kv.RwDB, cfg *ethconfig.Config, controlServer *sentry_multi_client.MultiClient, dirs datadir.Dirs, notifications *shards.Notifications, blockReader services.FullBlockReader, blockWriter *blockio.BlockWriter, agg *state.Aggregator, silkworm *silkworm.Silkworm, logger log.Logger) *stagedsync.Sync { + historyV3 := true return stagedsync.New( cfg.Sync, stagedsync.StateStages(ctx, - stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, false, blockReader, blockWriter, dirs.Tmp, cfg.HistoryV3, nil, nil), - stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, cfg.HistoryV3, blockWriter, nil), + stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, false, blockReader, blockWriter, dirs.Tmp, nil, nil), + stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, blockWriter, nil), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, true, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, nil), stagedsync.StageExecuteBlocksCfg( @@ -721,7 +718,6 @@ func NewInMemoryExecution(ctx context.Context, db kv.RwDB, cfg *ethconfig.Config notifications.Accumulator, cfg.StateStream, true, - cfg.HistoryV3, cfg.Dirs, blockReader, controlServer.Hd, @@ -730,8 +726,8 @@ func NewInMemoryExecution(ctx context.Context, db kv.RwDB, cfg *ethconfig.Config agg, SilkwormForExecutionStage(silkworm, cfg), ), - stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3), - stagedsync.StageTrieCfg(db, true, true, true, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg)), + stagedsync.StageHashStateCfg(db, dirs), + stagedsync.StageTrieCfg(db, true, true, true, dirs.Tmp, blockReader, controlServer.Hd, historyV3, agg)), stagedsync.StateUnwindOrder, nil, /* pruneOrder */ logger, @@ -765,7 +761,6 @@ func NewPolygonSyncStages( snapDownloader, blockReader, notifications, - config.HistoryV3, agg, config.InternalCL && config.CaplinConfig.Backfilling, config.CaplinConfig.BlobBackfilling, @@ -776,7 +771,7 @@ func NewPolygonSyncStages( db, config.Dirs.Tmp, chainConfig, - blockio.NewBlockWriter(config.HistoryV3), + blockio.NewBlockWriter(), ), stagedsync.StageSendersCfg( db, @@ -800,7 +795,6 @@ func NewPolygonSyncStages( notifications.Accumulator, config.StateStream, false, /* badBlockHalt */ - config.HistoryV3, config.Dirs, blockReader, nil, /* hd */ diff --git a/turbo/transactions/tracing.go b/turbo/transactions/tracing.go index 816ae8a41e1..26da6f5b9c5 100644 --- a/turbo/transactions/tracing.go +++ b/turbo/transactions/tracing.go @@ -9,9 +9,6 @@ import ( "time" jsoniter "github.com/json-iterator/go" - "github.com/ledgerwatch/erigon/eth/consensuschain" - "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" @@ -36,8 +33,8 @@ type BlockGetter interface { } // ComputeTxEnv returns the execution environment of a certain transaction. -func ComputeTxEnv(ctx context.Context, engine consensus.EngineReader, block *types.Block, cfg *chain.Config, headerReader services.HeaderReader, dbtx kv.Tx, txIndex int, historyV3 bool) (core.Message, evmtypes.BlockContext, evmtypes.TxContext, *state.IntraBlockState, state.StateReader, error) { - reader, err := rpchelper.CreateHistoryStateReader(dbtx, block.NumberU64(), txIndex, historyV3, cfg.ChainName) +func ComputeTxEnv(ctx context.Context, engine consensus.EngineReader, block *types.Block, cfg *chain.Config, headerReader services.HeaderReader, dbtx kv.Tx, txIndex int) (core.Message, evmtypes.BlockContext, evmtypes.TxContext, *state.IntraBlockState, state.StateReader, error) { + reader, err := rpchelper.CreateHistoryStateReader(dbtx, block.NumberU64(), txIndex, cfg.ChainName) if err != nil { return nil, evmtypes.BlockContext{}, evmtypes.TxContext{}, nil, nil, err } @@ -58,68 +55,19 @@ func ComputeTxEnv(ctx context.Context, engine consensus.EngineReader, block *typ // Recompute transactions up to the target index. signer := types.MakeSigner(cfg, block.NumberU64(), block.Time()) - if historyV3 { - rules := cfg.Rules(blockContext.BlockNumber, blockContext.Time) - txn := block.Transactions()[txIndex] - statedb.SetTxContext(txn.Hash(), block.Hash(), txIndex) - msg, _ := txn.AsMessage(*signer, block.BaseFee(), rules) - if msg.FeeCap().IsZero() && engine != nil { - syscall := func(contract libcommon.Address, data []byte) ([]byte, error) { - return core.SysCallContract(contract, data, cfg, statedb, header, engine, true /* constCall */) - } - msg.SetIsFree(engine.IsServiceTransaction(msg.From(), syscall)) + rules := cfg.Rules(blockContext.BlockNumber, blockContext.Time) + txn := block.Transactions()[txIndex] + statedb.SetTxContext(txn.Hash(), block.Hash(), txIndex) + msg, _ := txn.AsMessage(*signer, block.BaseFee(), rules) + if msg.FeeCap().IsZero() && engine != nil { + syscall := func(contract libcommon.Address, data []byte) ([]byte, error) { + return core.SysCallContract(contract, data, cfg, statedb, header, engine, true /* constCall */) } - - TxContext := core.NewEVMTxContext(msg) - return msg, blockContext, TxContext, statedb, reader, nil + msg.SetIsFree(engine.IsServiceTransaction(msg.From(), syscall)) } - vmenv := vm.NewEVM(blockContext, evmtypes.TxContext{}, statedb, cfg, vm.Config{}) - rules := vmenv.ChainRules() - - consensusHeaderReader := consensuschain.NewReader(cfg, dbtx, nil, nil) - - logger := log.New("tracing") - err = core.InitializeBlockExecution(engine.(consensus.Engine), consensusHeaderReader, header, cfg, statedb, logger) - if err != nil { - return nil, evmtypes.BlockContext{}, evmtypes.TxContext{}, nil, nil, err - } - - for idx, txn := range block.Transactions() { - select { - default: - case <-ctx.Done(): - return nil, evmtypes.BlockContext{}, evmtypes.TxContext{}, nil, nil, ctx.Err() - } - statedb.SetTxContext(txn.Hash(), block.Hash(), idx) - // Assemble the transaction call message and return if the requested offset - msg, _ := txn.AsMessage(*signer, block.BaseFee(), rules) - if msg.FeeCap().IsZero() && engine != nil { - syscall := func(contract libcommon.Address, data []byte) ([]byte, error) { - return core.SysCallContract(contract, data, cfg, statedb, header, engine, true /* constCall */) - } - msg.SetIsFree(engine.IsServiceTransaction(msg.From(), syscall)) - } - - TxContext := core.NewEVMTxContext(msg) - if idx == txIndex { - return msg, blockContext, TxContext, statedb, reader, nil - } - vmenv.Reset(TxContext, statedb) - // Not yet the searched for transaction, execute on top of the current state - if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(txn.GetGas()).AddBlobGas(txn.GetBlobGas()), true /* refunds */, false /* gasBailout */); err != nil { - return nil, evmtypes.BlockContext{}, evmtypes.TxContext{}, nil, nil, fmt.Errorf("transaction %x failed: %w", txn.Hash(), err) - } - // Ensure any modifications are committed to the state - // Only delete empty objects if EIP161 (part of Spurious Dragon) is in effect - _ = statedb.FinalizeTx(rules, reader.(*state.PlainState)) - - if idx+1 == len(block.Transactions()) { - // Return the state from evaluating all txs in the block, note no msg or TxContext in this case - return nil, blockContext, evmtypes.TxContext{}, statedb, reader, nil - } - } - return nil, evmtypes.BlockContext{}, evmtypes.TxContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %x", txIndex, block.Hash()) + TxContext := core.NewEVMTxContext(msg) + return msg, blockContext, TxContext, statedb, reader, nil } // TraceTx configures a new tracer according to the provided configuration, and From 65ba9b468ac16a726f9be7bab7f8170e8b0feb6c Mon Sep 17 00:00:00 2001 From: racytech <82003208+racytech@users.noreply.github.com> Date: Tue, 14 May 2024 22:05:01 +0600 Subject: [PATCH 48/48] [WIP] Implement EIP-7685 and EIP-6110 (#10238) All PRs from https://github.com/ledgerwatch/erigon/pull/10167 which was closed due to failing `Test erigon as a library` in integration tests --------- Co-authored-by: yperbasis --- cl/clparams/version.go | 5 + cl/cltypes/beacon_block_test.go | 2 +- .../block_collector/block_collector.go | 2 +- .../execution_client_direct.go | 2 +- cmd/evm/internal/t8ntool/execution.go | 1 + cmd/evm/internal/t8ntool/gen_stenv.go | 88 +++++---- cmd/evm/internal/t8ntool/transition.go | 2 +- cmd/state/commands/opcode_tracer.go | 2 +- cmd/state/exec3/state.go | 2 +- cmd/state/exec3/state_recon.go | 2 +- cmd/state/exec3/trace_worker2.go | 2 +- consensus/aura/aura.go | 8 +- consensus/clique/clique.go | 12 +- consensus/consensus.go | 6 +- consensus/ethash/consensus.go | 8 +- consensus/merge/merge.go | 13 +- core/blockchain.go | 23 ++- core/chain_makers.go | 4 +- core/genesis_write.go | 7 +- core/rawdb/accessors_chain.go | 5 +- core/rawdb/accessors_chain_test.go | 52 ++++- core/rawdb/accessors_indexes_test.go | 2 +- core/state/txtask.go | 2 + core/types/block.go | 187 ++++++++++++++++-- core/types/block_test.go | 6 +- core/types/deposit.go | 111 +++++++++++ core/types/encdec_test.go | 160 ++++++++++----- core/types/request.go | 111 +++++++++++ eth/stagedsync/stage_mining_create_block.go | 1 + eth/stagedsync/stage_mining_exec.go | 4 +- eth/stagedsync/stage_mining_finish.go | 2 +- polygon/bor/bor.go | 6 +- polygon/bor/fake.go | 4 +- polygon/p2p/message_listener_test.go | 2 +- .../statedb_insert_chain_transaction_test.go | 18 +- .../block_downloader.go | 2 +- turbo/engineapi/engine_server.go | 54 ++++- turbo/engineapi/engine_types/jsonrpc.go | 35 ++-- turbo/engineapi/interface.go | 2 + turbo/execution/eth1/block_building.go | 2 + .../eth1/eth1_chain_reader.go/chain_reader.go | 4 +- turbo/execution/eth1/eth1_utils/grpc_test.go | 9 +- .../snapshotsync/freezeblocks/block_reader.go | 7 +- 43 files changed, 778 insertions(+), 201 deletions(-) create mode 100644 core/types/deposit.go create mode 100644 core/types/request.go diff --git a/cl/clparams/version.go b/cl/clparams/version.go index c181337e337..7ba9f962b9a 100644 --- a/cl/clparams/version.go +++ b/cl/clparams/version.go @@ -10,6 +10,7 @@ const ( BellatrixVersion StateVersion = 2 CapellaVersion StateVersion = 3 DenebVersion StateVersion = 4 + ElectraVersion StateVersion = 5 ) // stringToClVersion converts the string to the current state version. @@ -25,6 +26,8 @@ func StringToClVersion(s string) (StateVersion, error) { return CapellaVersion, nil case "deneb": return DenebVersion, nil + case "electra": + return ElectraVersion, nil default: return 0, fmt.Errorf("unsupported fork version %s", s) } @@ -42,6 +45,8 @@ func ClVersionToString(s StateVersion) string { return "capella" case DenebVersion: return "deneb" + case ElectraVersion: + return "electra" default: panic("unsupported fork version") } diff --git a/cl/cltypes/beacon_block_test.go b/cl/cltypes/beacon_block_test.go index ae9c18347c1..73e4c111ec1 100644 --- a/cl/cltypes/beacon_block_test.go +++ b/cl/cltypes/beacon_block_test.go @@ -39,7 +39,7 @@ func TestBeaconBody(t *testing.T) { BaseFee: big.NewInt(1), }, []types.Transaction{types.NewTransaction(1, [20]byte{}, uint256.NewInt(1), 5, uint256.NewInt(2), nil)}, nil, nil, types.Withdrawals{&types.Withdrawal{ Index: 69, - }}) + }}, nil /*requests*/) // Test BeaconBody body := &BeaconBody{ diff --git a/cl/phase1/execution_client/block_collector/block_collector.go b/cl/phase1/execution_client/block_collector/block_collector.go index 5d2b0b9d46e..79eea139781 100644 --- a/cl/phase1/execution_client/block_collector/block_collector.go +++ b/cl/phase1/execution_client/block_collector/block_collector.go @@ -109,7 +109,7 @@ func (b *blockCollector) Flush(ctx context.Context) error { b.logger.Warn("bad blocks segment received", "err", err) return err } - blocksBatch = append(blocksBatch, types.NewBlockFromStorage(executionPayload.BlockHash, header, txs, nil, body.Withdrawals)) + blocksBatch = append(blocksBatch, types.NewBlockFromStorage(executionPayload.BlockHash, header, txs, nil, body.Withdrawals, body.Requests)) if len(blocksBatch) >= batchSize { b.logger.Info("[Caplin] Inserting blocks", "from", blocksBatch[0].NumberU64(), "to", blocksBatch[len(blocksBatch)-1].NumberU64()) if err := b.engine.InsertBlocks(ctx, blocksBatch, true); err != nil { diff --git a/cl/phase1/execution_client/execution_client_direct.go b/cl/phase1/execution_client/execution_client_direct.go index 6679e38dfb1..5ff16bb0df7 100644 --- a/cl/phase1/execution_client/execution_client_direct.go +++ b/cl/phase1/execution_client/execution_client_direct.go @@ -40,7 +40,7 @@ func (cc *ExecutionClientDirect) NewPayload(ctx context.Context, payload *cltype return true, err } - if err := cc.chainRW.InsertBlockAndWait(ctx, types.NewBlockFromStorage(payload.BlockHash, header, txs, nil, body.Withdrawals)); err != nil { + if err := cc.chainRW.InsertBlockAndWait(ctx, types.NewBlockFromStorage(payload.BlockHash, header, txs, nil, body.Withdrawals, body.Requests)); err != nil { return false, err } diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index e04944ea1e1..f7b0bcb56f7 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -60,6 +60,7 @@ type stEnv struct { UncleHash libcommon.Hash `json:"uncleHash,omitempty"` Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` WithdrawalsHash *libcommon.Hash `json:"withdrawalsRoot,omitempty"` + Requests []*types.Request `json:"requests,omitempty"` } type stEnvMarshaling struct { diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go index 7f08b6a3735..4a854e2ee59 100644 --- a/cmd/evm/internal/t8ntool/gen_stenv.go +++ b/cmd/evm/internal/t8ntool/gen_stenv.go @@ -7,9 +7,8 @@ import ( "errors" "math/big" - libcommon "github.com/ledgerwatch/erigon-lib/common" - - "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon-lib/common" + common0 "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/core/types" ) @@ -19,25 +18,29 @@ var _ = (*stEnvMarshaling)(nil) // MarshalJSON marshals as JSON. func (s stEnv) MarshalJSON() ([]byte, error) { type stEnv struct { - Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` - Random *math.HexOrDecimal256 `json:"currentRandom"` - ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` - GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` - ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` - BlockHashes map[math.HexOrDecimal64]libcommon.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` - BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` - ParentUncleHash libcommon.Hash `json:"parentUncleHash"` - UncleHash libcommon.Hash `json:"uncleHash,omitempty"` - Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` + Coinbase common0.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` + Random *math.HexOrDecimal256 `json:"currentRandom"` + MixDigest common.Hash `json:"mixHash,omitempty"` + ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` + GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` + ParentUncleHash common.Hash `json:"parentUncleHash"` + UncleHash common.Hash `json:"uncleHash,omitempty"` + Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` + WithdrawalsHash *common.Hash `json:"withdrawalsRoot,omitempty"` + Requests []*types.Request `json:"requests,omitempty"` } var enc stEnv - enc.Coinbase = common.UnprefixedAddress(s.Coinbase) + enc.Coinbase = common0.UnprefixedAddress(s.Coinbase) enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty) enc.Random = (*math.HexOrDecimal256)(s.Random) + enc.MixDigest = s.MixDigest enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty) enc.GasLimit = math.HexOrDecimal64(s.GasLimit) enc.Number = math.HexOrDecimal64(s.Number) @@ -49,26 +52,31 @@ func (s stEnv) MarshalJSON() ([]byte, error) { enc.ParentUncleHash = s.ParentUncleHash enc.UncleHash = s.UncleHash enc.Withdrawals = s.Withdrawals + enc.WithdrawalsHash = s.WithdrawalsHash + enc.Requests = s.Requests return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. func (s *stEnv) UnmarshalJSON(input []byte) error { type stEnv struct { - Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` - Random *math.HexOrDecimal256 `json:"currentRandom"` - ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` - GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` - ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` - BlockHashes map[math.HexOrDecimal64]libcommon.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` - BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` - ParentUncleHash *libcommon.Hash `json:"parentUncleHash"` - UncleHash libcommon.Hash `json:"uncleHash,omitempty"` - Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` + Coinbase *common0.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` + Random *math.HexOrDecimal256 `json:"currentRandom"` + MixDigest *common.Hash `json:"mixHash,omitempty"` + ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` + GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` + ParentUncleHash *common.Hash `json:"parentUncleHash"` + UncleHash *common.Hash `json:"uncleHash,omitempty"` + Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` + WithdrawalsHash *common.Hash `json:"withdrawalsRoot,omitempty"` + Requests []*types.Request `json:"requests,omitempty"` } var dec stEnv if err := json.Unmarshal(input, &dec); err != nil { @@ -77,13 +85,16 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { if dec.Coinbase == nil { return errors.New("missing required field 'currentCoinbase' for stEnv") } - s.Coinbase = libcommon.Address(*dec.Coinbase) + s.Coinbase = common.Address(*dec.Coinbase) if dec.Difficulty != nil { s.Difficulty = (*big.Int)(dec.Difficulty) } if dec.Random != nil { s.Random = (*big.Int)(dec.Random) } + if dec.MixDigest != nil { + s.MixDigest = *dec.MixDigest + } if dec.ParentDifficulty != nil { s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty) } @@ -114,10 +125,17 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { if dec.ParentUncleHash != nil { s.ParentUncleHash = *dec.ParentUncleHash } - s.UncleHash = dec.UncleHash + if dec.UncleHash != nil { + s.UncleHash = *dec.UncleHash + } if dec.Withdrawals != nil { s.Withdrawals = dec.Withdrawals } - + if dec.WithdrawalsHash != nil { + s.WithdrawalsHash = dec.WithdrawalsHash + } + if dec.Requests != nil { + s.Requests = dec.Requests + } return nil } diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index f89020cfc95..200448aecb4 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -278,7 +278,7 @@ func Main(ctx *cli.Context) error { ommerN.SetUint64(header.Number.Uint64() - ommer.Delta) ommerHeaders[i] = &types.Header{Coinbase: ommer.Address, Number: &ommerN} } - block := types.NewBlock(header, txs, ommerHeaders, nil /* receipts */, prestate.Env.Withdrawals) + block := types.NewBlock(header, txs, ommerHeaders, nil /* receipts */, prestate.Env.Withdrawals, prestate.Env.Requests) var hashError error getHash := func(num uint64) libcommon.Hash { diff --git a/cmd/state/commands/opcode_tracer.go b/cmd/state/commands/opcode_tracer.go index f2876e989bf..e91c94070a8 100644 --- a/cmd/state/commands/opcode_tracer.go +++ b/cmd/state/commands/opcode_tracer.go @@ -723,7 +723,7 @@ func runBlock(engine consensus.Engine, ibs *state.IntraBlockState, txnWriter sta if !vmConfig.ReadOnly { // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) tx := block.Transactions() - if _, _, _, err := engine.FinalizeAndAssemble(chainConfig, header, ibs, tx, block.Uncles(), receipts, block.Withdrawals(), nil, nil, nil, logger); err != nil { + if _, _, _, err := engine.FinalizeAndAssemble(chainConfig, header, ibs, tx, block.Uncles(), receipts, block.Withdrawals(), block.Requests(), nil, nil, nil, logger); err != nil { return nil, fmt.Errorf("finalize of block %d failed: %w", block.NumberU64(), err) } diff --git a/cmd/state/exec3/state.go b/cmd/state/exec3/state.go index 2f2beaab96b..de7fce77946 100644 --- a/cmd/state/exec3/state.go +++ b/cmd/state/exec3/state.go @@ -205,7 +205,7 @@ func (rw *Worker) RunTxTaskNoLock(txTask *state.TxTask) { return core.SysCallContract(contract, data, rw.chainConfig, ibs, header, rw.engine, false /* constCall */) } - _, _, err := rw.engine.Finalize(rw.chainConfig, types.CopyHeader(header), ibs, txTask.Txs, txTask.Uncles, txTask.BlockReceipts, txTask.Withdrawals, rw.chain, syscall, rw.logger) + _, _, err := rw.engine.Finalize(rw.chainConfig, types.CopyHeader(header), ibs, txTask.Txs, txTask.Uncles, txTask.BlockReceipts, txTask.Withdrawals, txTask.Requests, rw.chain, syscall, rw.logger) if err != nil { txTask.Error = err } else { diff --git a/cmd/state/exec3/state_recon.go b/cmd/state/exec3/state_recon.go index ebdd9f8c33b..3a98157686b 100644 --- a/cmd/state/exec3/state_recon.go +++ b/cmd/state/exec3/state_recon.go @@ -308,7 +308,7 @@ func (rw *ReconWorker) runTxTask(txTask *state.TxTask) error { syscall := func(contract libcommon.Address, data []byte) ([]byte, error) { return core.SysCallContract(contract, data, rw.chainConfig, ibs, header, rw.engine, false /* constCall */) } - if _, _, err := rw.engine.Finalize(rw.chainConfig, types.CopyHeader(header), ibs, txTask.Txs, txTask.Uncles, txTask.BlockReceipts, txTask.Withdrawals, rw.chain, syscall, rw.logger); err != nil { + if _, _, err := rw.engine.Finalize(rw.chainConfig, types.CopyHeader(header), ibs, txTask.Txs, txTask.Uncles, txTask.BlockReceipts, txTask.Withdrawals, txTask.Requests, rw.chain, syscall, rw.logger); err != nil { if _, readError := rw.stateReader.ReadError(); !readError { return fmt.Errorf("finalize of block %d failed: %w", txTask.BlockNum, err) } diff --git a/cmd/state/exec3/trace_worker2.go b/cmd/state/exec3/trace_worker2.go index 1e8fccdec36..2afb92bf3d6 100644 --- a/cmd/state/exec3/trace_worker2.go +++ b/cmd/state/exec3/trace_worker2.go @@ -155,7 +155,7 @@ func (rw *TraceWorker2) RunTxTask(txTask *state.TxTask) { return core.SysCallContract(contract, data, rw.execArgs.ChainConfig, ibs, header, rw.execArgs.Engine, false /* constCall */) } - _, _, err := rw.execArgs.Engine.Finalize(rw.execArgs.ChainConfig, types.CopyHeader(header), ibs, txTask.Txs, txTask.Uncles, txTask.BlockReceipts, txTask.Withdrawals, rw.chain, syscall, rw.logger) + _, _, err := rw.execArgs.Engine.Finalize(rw.execArgs.ChainConfig, types.CopyHeader(header), ibs, txTask.Txs, txTask.Uncles, txTask.BlockReceipts, txTask.Withdrawals, txTask.Requests, rw.chain, syscall, rw.logger) if err != nil { txTask.Error = err } diff --git a/consensus/aura/aura.go b/consensus/aura/aura.go index 9b571d2513b..2d1d84b97b2 100644 --- a/consensus/aura/aura.go +++ b/consensus/aura/aura.go @@ -703,7 +703,7 @@ func (c *AuRa) applyRewards(header *types.Header, state *state.IntraBlockState, // word `signal epoch` == word `pending epoch` func (c *AuRa) Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState, txs types.Transactions, - uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, + uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, chain consensus.ChainReader, syscall consensus.SystemCall, logger log.Logger, ) (types.Transactions, types.Receipts, error) { if err := c.applyRewards(header, state, syscall); err != nil { @@ -842,14 +842,14 @@ func allHeadersUntil(chain consensus.ChainHeaderReader, from *types.Header, to l //} // FinalizeAndAssemble implements consensus.Engine -func (c *AuRa) FinalizeAndAssemble(config *chain.Config, header *types.Header, state *state.IntraBlockState, txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger) (*types.Block, types.Transactions, types.Receipts, error) { - outTxs, outReceipts, err := c.Finalize(config, header, state, txs, uncles, receipts, withdrawals, chain, syscall, logger) +func (c *AuRa) FinalizeAndAssemble(config *chain.Config, header *types.Header, state *state.IntraBlockState, txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger) (*types.Block, types.Transactions, types.Receipts, error) { + outTxs, outReceipts, err := c.Finalize(config, header, state, txs, uncles, receipts, withdrawals, requests, chain, syscall, logger) if err != nil { return nil, nil, nil, err } // Assemble and return the final block for sealing - return types.NewBlock(header, outTxs, uncles, outReceipts, withdrawals), outTxs, outReceipts, nil + return types.NewBlock(header, outTxs, uncles, outReceipts, withdrawals, requests), outTxs, outReceipts, nil } // Authorize injects a private key into the consensus engine to mint new blocks diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index 6efe46f21ae..6885c8218d4 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -22,14 +22,15 @@ import ( "context" "errors" "fmt" - "github.com/ledgerwatch/erigon-lib/common/hexutil" - "github.com/ledgerwatch/erigon-lib/kv/dbutils" "io" "math/big" "math/rand" "sync" "time" + "github.com/ledgerwatch/erigon-lib/common/hexutil" + "github.com/ledgerwatch/erigon-lib/kv/dbutils" + "github.com/goccy/go-json" lru "github.com/hashicorp/golang-lru/arc/v2" "github.com/ledgerwatch/erigon/turbo/services" @@ -377,7 +378,7 @@ func (c *Clique) CalculateRewards(config *chain.Config, header *types.Header, un // Finalize implements consensus.Engine, ensuring no uncles are set, nor block // rewards given. func (c *Clique) Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, + txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, chain consensus.ChainReader, syscall consensus.SystemCall, logger log.Logger, ) (types.Transactions, types.Receipts, error) { // No block rewards in PoA, so the state remains as is and uncles are dropped @@ -388,14 +389,13 @@ func (c *Clique) Finalize(config *chain.Config, header *types.Header, state *sta // FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set, // nor block rewards given, and returns the final block. func (c *Clique) FinalizeAndAssemble(chainConfig *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, - chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger, + txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger, ) (*types.Block, types.Transactions, types.Receipts, error) { // No block rewards in PoA, so the state remains as is and uncles are dropped header.UncleHash = types.CalcUncleHash(nil) // Assemble and return the final block for sealing - return types.NewBlock(header, txs, nil, receipts, withdrawals), txs, receipts, nil + return types.NewBlock(header, txs, nil, receipts, withdrawals, requests), txs, receipts, nil } // Authorize injects a private key into the consensus engine to mint new blocks diff --git a/consensus/consensus.go b/consensus/consensus.go index 334b7f85a1f..3e58f732144 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -154,8 +154,7 @@ type EngineWriter interface { // Note: The block header and state database might be updated to reflect any // consensus rules that happen at finalization (e.g. block rewards). Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, - chain ChainReader, syscall SystemCall, logger log.Logger, + txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, chain ChainReader, syscall SystemCall, logger log.Logger, ) (types.Transactions, types.Receipts, error) // FinalizeAndAssemble runs any post-transaction state modifications (e.g. block @@ -164,8 +163,7 @@ type EngineWriter interface { // Note: The block header and state database might be updated to reflect any // consensus rules that happen at finalization (e.g. block rewards). FinalizeAndAssemble(config *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, - chain ChainReader, syscall SystemCall, call Call, logger log.Logger, + txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, chain ChainReader, syscall SystemCall, call Call, logger log.Logger, ) (*types.Block, types.Transactions, types.Receipts, error) // Seal generates a new sealing request for the given input block and pushes diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 00c26a60d86..5642478d317 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -562,7 +562,7 @@ func (ethash *Ethash) Initialize(config *chain.Config, chain consensus.ChainHead // Finalize implements consensus.Engine, accumulating the block and uncle rewards, // setting the final state on the header func (ethash *Ethash) Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, + txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, chain consensus.ChainReader, syscall consensus.SystemCall, logger log.Logger, ) (types.Transactions, types.Receipts, error) { // Accumulate any block and uncle rewards and commit the final state root @@ -573,17 +573,17 @@ func (ethash *Ethash) Finalize(config *chain.Config, header *types.Header, state // FinalizeAndAssemble implements consensus.Engine, accumulating the block and // uncle rewards, setting the final state and assembling the block. func (ethash *Ethash) FinalizeAndAssemble(chainConfig *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, + txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger, ) (*types.Block, types.Transactions, types.Receipts, error) { // Finalize block - outTxs, outR, err := ethash.Finalize(chainConfig, header, state, txs, uncles, r, withdrawals, chain, syscall, logger) + outTxs, outR, err := ethash.Finalize(chainConfig, header, state, txs, uncles, r, withdrawals, requests, chain, syscall, logger) if err != nil { return nil, nil, nil, err } // Header seems complete, assemble into a block and return - return types.NewBlock(header, outTxs, uncles, outR, withdrawals), outTxs, outR, nil + return types.NewBlock(header, outTxs, uncles, outR, withdrawals, requests), outTxs, outR, nil } // SealHash returns the hash of a block prior to it being sealed. diff --git a/consensus/merge/merge.go b/consensus/merge/merge.go index 823be69a219..8852b3184eb 100644 --- a/consensus/merge/merge.go +++ b/consensus/merge/merge.go @@ -131,11 +131,11 @@ func (s *Merge) CalculateRewards(config *chain.Config, header *types.Header, unc } func (s *Merge) Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, + txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, chain consensus.ChainReader, syscall consensus.SystemCall, logger log.Logger, ) (types.Transactions, types.Receipts, error) { if !misc.IsPoSHeader(header) { - return s.eth1Engine.Finalize(config, header, state, txs, uncles, r, withdrawals, chain, syscall, logger) + return s.eth1Engine.Finalize(config, header, state, txs, uncles, r, withdrawals, requests, chain, syscall, logger) } rewards, err := s.CalculateRewards(config, header, uncles, syscall) @@ -163,17 +163,16 @@ func (s *Merge) Finalize(config *chain.Config, header *types.Header, state *stat } func (s *Merge) FinalizeAndAssemble(config *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, - chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger, + txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger, ) (*types.Block, types.Transactions, types.Receipts, error) { if !misc.IsPoSHeader(header) { - return s.eth1Engine.FinalizeAndAssemble(config, header, state, txs, uncles, receipts, withdrawals, chain, syscall, call, logger) + return s.eth1Engine.FinalizeAndAssemble(config, header, state, txs, uncles, receipts, withdrawals, requests, chain, syscall, call, logger) } - outTxs, outReceipts, err := s.Finalize(config, header, state, txs, uncles, receipts, withdrawals, chain, syscall, logger) + outTxs, outReceipts, err := s.Finalize(config, header, state, txs, uncles, receipts, withdrawals, requests, chain, syscall, logger) if err != nil { return nil, nil, nil, err } - return types.NewBlock(header, outTxs, uncles, outReceipts, withdrawals), outTxs, outReceipts, nil + return types.NewBlock(header, outTxs, uncles, outReceipts, withdrawals, requests), outTxs, outReceipts, nil } func (s *Merge) SealHash(header *types.Header) (hash libcommon.Hash) { diff --git a/core/blockchain.go b/core/blockchain.go index 8dcc0c2f966..0120dbd7cbb 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -105,6 +105,7 @@ func ExecuteBlockEphemerally( includedTxs := make(types.Transactions, 0, block.Transactions().Len()) receipts := make(types.Receipts, 0, block.Transactions().Len()) noop := state.NewNoopWriter() + var allLogs types.Logs for i, tx := range block.Transactions() { ibs.SetTxContext(tx.Hash(), block.Hash(), i) writeTrace := false @@ -135,6 +136,7 @@ func ExecuteBlockEphemerally( receipts = append(receipts, receipt) } } + allLogs = append(allLogs, receipt.Logs...) } receiptSha := types.DeriveSha(receipts) @@ -163,7 +165,7 @@ func ExecuteBlockEphemerally( } if !vmConfig.ReadOnly { txs := block.Transactions() - if _, _, _, err := FinalizeBlockExecution(engine, stateReader, block.Header(), txs, block.Uncles(), stateWriter, chainConfig, ibs, receipts, block.Withdrawals(), chainReader, false, logger); err != nil { + if _, _, _, err := FinalizeBlockExecution(engine, stateReader, block.Header(), txs, block.Uncles(), stateWriter, chainConfig, ibs, receipts, block.Withdrawals(), block.Requests(), chainReader, false, logger); err != nil { return nil, err } } @@ -201,6 +203,19 @@ func ExecuteBlockEphemerally( execRs.StateSyncReceipt = stateSyncReceipt } + if chainConfig.IsPrague(block.Time()) { + requests, err := types.ParseDepositLogs(allLogs, chainConfig.DepositContract) + if err != nil { + return nil, fmt.Errorf("error: could not parse requests logs: %v", err) + } + + rh := types.DeriveSha(requests) + if *block.Header().RequestsRoot != rh && !vmConfig.NoReceipts { + // TODO(racytech): do we have to check it here? + return nil, fmt.Errorf("error: invalid requests root hash, expected: %v, got :%v", *block.Header().RequestsRoot, rh) + } + } + return execRs, nil } @@ -315,7 +330,7 @@ func FinalizeBlockExecution( header *types.Header, txs types.Transactions, uncles []*types.Header, stateWriter state.StateWriter, cc *chain.Config, ibs *state.IntraBlockState, receipts types.Receipts, - withdrawals []*types.Withdrawal, chainReader consensus.ChainReader, + withdrawals []*types.Withdrawal, requests []*types.Request, chainReader consensus.ChainReader, isMining bool, logger log.Logger, ) (newBlock *types.Block, newTxs types.Transactions, newReceipt types.Receipts, err error) { @@ -323,9 +338,9 @@ func FinalizeBlockExecution( return SysCallContract(contract, data, cc, ibs, header, engine, false /* constCall */) } if isMining { - newBlock, newTxs, newReceipt, err = engine.FinalizeAndAssemble(cc, header, ibs, txs, uncles, receipts, withdrawals, chainReader, syscall, nil, logger) + newBlock, newTxs, newReceipt, err = engine.FinalizeAndAssemble(cc, header, ibs, txs, uncles, receipts, withdrawals, requests, chainReader, syscall, nil, logger) } else { - _, _, err = engine.Finalize(cc, header, ibs, txs, uncles, receipts, withdrawals, chainReader, syscall, logger) + _, _, err = engine.Finalize(cc, header, ibs, txs, uncles, receipts, withdrawals, requests, chainReader, syscall, logger) } if err != nil { return nil, nil, nil, err diff --git a/core/chain_makers.go b/core/chain_makers.go index 6b1054c75c8..934906e6623 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -379,7 +379,7 @@ func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.E txNumIncrement() if b.engine != nil { // Finalize and seal the block - if _, _, _, err := b.engine.FinalizeAndAssemble(config, b.header, ibs, b.txs, b.uncles, b.receipts, nil, nil, nil, nil, logger); err != nil { + if _, _, _, err := b.engine.FinalizeAndAssemble(config, b.header, ibs, b.txs, b.uncles, b.receipts, nil, nil, nil, nil, nil, logger); err != nil { return nil, nil, fmt.Errorf("call to FinaliseAndAssemble: %w", err) } // Write state changes to db @@ -407,7 +407,7 @@ func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.E } _ = err // Recreating block to make sure Root makes it into the header - block := types.NewBlock(b.header, b.txs, b.uncles, b.receipts, nil /* withdrawals */) + block := types.NewBlock(b.header, b.txs, b.uncles, b.receipts, nil /* withdrawals */, nil /*requests*/) return block, b.receipts, nil } return nil, nil, fmt.Errorf("no engine to generate blocks") diff --git a/core/genesis_write.go b/core/genesis_write.go index 9520adf4b4c..ae5c095753e 100644 --- a/core/genesis_write.go +++ b/core/genesis_write.go @@ -518,6 +518,11 @@ func GenesisToBlock(g *types.Genesis, tmpDir string, logger log.Logger) (*types. } } + var requests []*types.Request // TODO(racytech): revisit this after merge, make sure everythin is correct + if g.Config != nil && g.Config.IsPrague(g.Timestamp) { + requests = []*types.Request{} + } + var root libcommon.Hash var statedb *state.IntraBlockState wg := sync.WaitGroup{} @@ -594,7 +599,7 @@ func GenesisToBlock(g *types.Genesis, tmpDir string, logger log.Logger) (*types. head.Root = root - return types.NewBlock(head, nil, nil, nil, withdrawals), statedb, nil + return types.NewBlock(head, nil, nil, nil, withdrawals, requests), statedb, nil } func sortedAllocKeys(m types.GenesisAlloc) []string { diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 67d77f0472e..c2bbfa318a6 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -610,6 +610,7 @@ func ReadBody(db kv.Getter, hash common.Hash, number uint64) (*types.Body, uint6 body := new(types.Body) body.Uncles = bodyForStorage.Uncles body.Withdrawals = bodyForStorage.Withdrawals + body.Requests = bodyForStorage.Requests if bodyForStorage.TxAmount < 2 { panic(fmt.Sprintf("block body hash too few txs amount: %d, %d", number, bodyForStorage.TxAmount)) @@ -654,6 +655,7 @@ func WriteRawBody(db kv.RwTx, hash common.Hash, number uint64, body *types.RawBo TxAmount: uint32(len(body.Transactions)) + 2, /*system txs*/ Uncles: body.Uncles, Withdrawals: body.Withdrawals, + Requests: body.Requests, } if err = WriteBodyForStorage(db, hash, number, &data); err != nil { return false, fmt.Errorf("WriteBodyForStorage: %w", err) @@ -677,6 +679,7 @@ func WriteBody(db kv.RwTx, hash common.Hash, number uint64, body *types.Body) (e TxAmount: uint32(len(body.Transactions)) + 2, Uncles: body.Uncles, Withdrawals: body.Withdrawals, + Requests: body.Requests, } if err = WriteBodyForStorage(db, hash, number, &data); err != nil { return fmt.Errorf("failed to write body: %w", err) @@ -979,7 +982,7 @@ func ReadBlock(tx kv.Getter, hash common.Hash, number uint64) *types.Block { if body == nil { return nil } - return types.NewBlockFromStorage(hash, header, body.Transactions, body.Uncles, body.Withdrawals) + return types.NewBlockFromStorage(hash, header, body.Transactions, body.Uncles, body.Withdrawals, body.Requests) } // HasBlock - is more efficient than ReadBlock because doesn't read transactions. diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index 9b6e9a84e77..9449f8ef641 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -25,6 +25,7 @@ import ( "testing" libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/turbo/stages/mock" @@ -573,6 +574,32 @@ func TestBlockWithdrawalsStorage(t *testing.T) { withdrawals = append(withdrawals, &w) withdrawals = append(withdrawals, &w2) + pk := [48]byte{} + copy(pk[:], libcommon.Hex2Bytes("3d1291c96ad36914068b56d93974c1b1d5afcb3fcd37b2ac4b144afd3f6fec5b")) + sig := [96]byte{} + copy(sig[:], libcommon.Hex2Bytes("20a0a807c717055ecb60dc9d5071fbd336f7f238d61a288173de20f33f79ebf4")) + r1 := types.Deposit{ + Pubkey: pk, + WithdrawalCredentials: libcommon.Hash(hexutility.Hex2Bytes("15095f80cde9763665d2eee3f8dfffc4a4405544c6fece33130e6e98809c4b98")), + Amount: 12324, + Signature: sig, + Index: 0, + } + pk2 := [48]byte{} + copy(pk2[:], libcommon.Hex2Bytes("d40ffb510bfc52b058d5e934026ce3eddaf0a4b1703920f03b32b97de2196a93")) + sig2 := [96]byte{} + copy(sig2[:], libcommon.Hex2Bytes("dc40cf2c33c6fb17e11e3ffe455063f1bf2280a3b08563f8b33aa359a16a383c")) + r2 := types.Deposit{ + Pubkey: pk2, + WithdrawalCredentials: libcommon.Hash(hexutility.Hex2Bytes("d73d9332eb1229e58aa7e33e9a5079d9474f68f747544551461bf3ff9f7ccd64")), + Amount: 12324, + Signature: sig2, + Index: 0, + } + deposits := make(types.Deposits, 0) + deposits = append(deposits, &r1) + deposits = append(deposits, &r2) + reqs := deposits.ToRequests() // Create a test block to move around the database and make sure it's really new block := types.NewBlockWithHeader(&types.Header{ Number: big.NewInt(1), @@ -592,8 +619,7 @@ func TestBlockWithdrawalsStorage(t *testing.T) { } // Write withdrawals to block - wBlock := types.NewBlockFromStorage(block.Hash(), block.Header(), block.Transactions(), block.Uncles(), withdrawals) - + wBlock := types.NewBlockFromStorage(block.Hash(), block.Header(), block.Transactions(), block.Uncles(), withdrawals, reqs) if err := rawdb.WriteHeader(tx, wBlock.HeaderNoCopy()); err != nil { t.Fatalf("Could not write body: %v", err) } @@ -647,6 +673,28 @@ func TestBlockWithdrawalsStorage(t *testing.T) { require.Equal(libcommon.Address{0: 0xff}, rw2.Address) require.Equal(uint64(1001), rw2.Amount) + readRequests := entry.Requests + require.True(len(entry.Requests) == 2) + rd1 := readRequests[0] + rd2 := readRequests[1] + require.True(rd1.Type() == types.DepositRequestType) + require.True(rd2.Type() == types.DepositRequestType) + + readDeposits := (types.Requests)(readRequests).Deposits() + d1 := readDeposits[0] + d2 := readDeposits[1] + require.Equal(d1.Pubkey, r1.Pubkey) + require.Equal(d1.Amount, r1.Amount) + require.Equal(d1.Signature, r1.Signature) + require.Equal(d1.WithdrawalCredentials, r1.WithdrawalCredentials) + require.Equal(d1.Index, r1.Index) + + require.Equal(d2.Pubkey, r2.Pubkey) + require.Equal(d2.Amount, r2.Amount) + require.Equal(d2.Signature, r2.Signature) + require.Equal(d2.WithdrawalCredentials, r2.WithdrawalCredentials) + require.Equal(d2.Index, r2.Index) + // Delete the block and verify the execution if err := rawdb.TruncateBlocks(context.Background(), tx, block.NumberU64()); err != nil { t.Fatal(err) diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go index 6901a6c5eb3..2845724b61e 100644 --- a/core/rawdb/accessors_indexes_test.go +++ b/core/rawdb/accessors_indexes_test.go @@ -63,7 +63,7 @@ func TestLookupStorage(t *testing.T) { tx3 := types.NewTransaction(3, libcommon.BytesToAddress([]byte{0x33}), uint256.NewInt(333), 3333, uint256.NewInt(33333), []byte{0x33, 0x33, 0x33}) txs := []types.Transaction{tx1, tx2, tx3} - block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, nil) + block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, nil, nil /*requests*/) // Check that no transactions entries are in a pristine database for i, txn := range txs { diff --git a/core/state/txtask.go b/core/state/txtask.go index 616f2094b36..c78f84684c3 100644 --- a/core/state/txtask.go +++ b/core/state/txtask.go @@ -62,6 +62,8 @@ type TxTask struct { // Need investigate if we can pass here - only limited amount of receipts // And remove this field if possible - because it will make problems for parallel-execution BlockReceipts types.Receipts + + Requests types.Requests } func (t *TxTask) Reset() { diff --git a/core/types/block.go b/core/types/block.go index f35ef67901f..fb0282e67d2 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -105,6 +105,8 @@ type Header struct { ParentBeaconBlockRoot *libcommon.Hash `json:"parentBeaconBlockRoot"` // EIP-4788 + RequestsRoot *libcommon.Hash `json:"requestsRoot"` // EIP-7685 + // The verkle proof is ignored in legacy headers Verkle bool VerkleProof []byte @@ -161,6 +163,10 @@ func (h *Header) EncodingSize() int { encodingSize += 33 } + if h.RequestsRoot != nil { + encodingSize += 33 + } + if h.Verkle { // Encoding of Verkle Proof encodingSize += rlp2.StringLen(h.VerkleProof) @@ -310,6 +316,16 @@ func (h *Header) EncodeRLP(w io.Writer) error { } } + if h.RequestsRoot != nil { + b[0] = 128 + 32 + if _, err := w.Write(b[:1]); err != nil { + return err + } + if _, err := w.Write(h.RequestsRoot.Bytes()); err != nil { + return err + } + } + if h.Verkle { if err := rlp.EncodeString(h.VerkleProof, w, b[:]); err != nil { return err @@ -498,6 +514,23 @@ func (h *Header) DecodeRLP(s *rlp.Stream) error { h.ParentBeaconBlockRoot = new(libcommon.Hash) h.ParentBeaconBlockRoot.SetBytes(b) + // RequestsRoot + if b, err = s.Bytes(); err != nil { + if errors.Is(err, rlp.EOL) { + h.RequestsRoot = nil + if err := s.ListEnd(); err != nil { + return fmt.Errorf("close header struct (no RequestsRoot): %w", err) + } + return nil + } + return fmt.Errorf("read RequestsRoot: %w", err) + } + if len(b) != 32 { + return fmt.Errorf("wrong size for RequestsRoot: %d", len(b)) + } + h.RequestsRoot = new(libcommon.Hash) + h.RequestsRoot.SetBytes(b) + if h.Verkle { if h.VerkleProof, err = s.Bytes(); err != nil { return fmt.Errorf("read VerkleProof: %w", err) @@ -557,6 +590,9 @@ func (h *Header) Size() common.StorageSize { if h.ParentBeaconBlockRoot != nil { s += common.StorageSize(32) } + if h.RequestsRoot != nil { + s += common.StorageSize(32) + } return s } @@ -591,6 +627,7 @@ type Body struct { Transactions []Transaction Uncles []*Header Withdrawals []*Withdrawal + Requests []*Request } // RawBody is semi-parsed variant of Body, where transactions are still unparsed RLP strings @@ -600,6 +637,7 @@ type RawBody struct { Transactions [][]byte Uncles []*Header Withdrawals []*Withdrawal + Requests []*Request } type BodyForStorage struct { @@ -607,6 +645,7 @@ type BodyForStorage struct { TxAmount uint32 Uncles []*Header Withdrawals []*Withdrawal + Requests []*Request } // Alternative representation of the Block. @@ -638,6 +677,7 @@ type Block struct { uncles []*Header transactions Transactions withdrawals []*Withdrawal + requests []*Request // caches hash atomic.Value @@ -666,11 +706,11 @@ func (b *Body) SendersFromTxs() []libcommon.Address { } func (rb RawBody) EncodingSize() int { - payloadSize, _, _, _ := rb.payloadSize() + payloadSize, _, _, _, _ := rb.payloadSize() return payloadSize } -func (rb RawBody) payloadSize() (payloadSize, txsLen, unclesLen, withdrawalsLen int) { +func (rb RawBody) payloadSize() (payloadSize, txsLen, unclesLen, withdrawalsLen, requestsLen int) { // size of Transactions for _, tx := range rb.Transactions { txsLen += len(tx) @@ -687,11 +727,17 @@ func (rb RawBody) payloadSize() (payloadSize, txsLen, unclesLen, withdrawalsLen payloadSize += rlp2.ListPrefixLen(withdrawalsLen) + withdrawalsLen } - return payloadSize, txsLen, unclesLen, withdrawalsLen + // size of requests + if rb.Requests != nil { + requestsLen += encodingSizeGeneric(rb.Requests) + payloadSize += rlp2.ListPrefixLen(requestsLen) + requestsLen + } + + return payloadSize, txsLen, unclesLen, withdrawalsLen, requestsLen } func (rb RawBody) EncodeRLP(w io.Writer) error { - payloadSize, txsLen, unclesLen, withdrawalsLen := rb.payloadSize() + payloadSize, txsLen, unclesLen, withdrawalsLen, requestsLen := rb.payloadSize() var b [33]byte // prefix if err := EncodeStructSizePrefix(payloadSize, w, b[:]); err != nil { @@ -716,6 +762,12 @@ func (rb RawBody) EncodeRLP(w io.Writer) error { return err } } + // encode Requests + if rb.Requests != nil { + if err := encodeRLPGeneric(rb.Requests, requestsLen, w, b[:]); err != nil { + return err + } + } return nil } @@ -751,11 +803,16 @@ func (rb *RawBody) DecodeRLP(s *rlp.Stream) error { if err := decodeWithdrawals(&rb.Withdrawals, s); err != nil { return err } + // decode Requests + rb.Requests = []*Request{} + if err := decodeRequests(&rb.Requests, s); err != nil { + return err + } return s.ListEnd() } -func (bfs BodyForStorage) payloadSize() (payloadSize, unclesLen, withdrawalsLen int) { +func (bfs BodyForStorage) payloadSize() (payloadSize, unclesLen, withdrawalsLen, requestsLen int) { baseTxIdLen := 1 + rlp.IntLenExcludingHead(bfs.BaseTxId) txAmountLen := 1 + rlp.IntLenExcludingHead(uint64(bfs.TxAmount)) @@ -772,11 +829,17 @@ func (bfs BodyForStorage) payloadSize() (payloadSize, unclesLen, withdrawalsLen payloadSize += rlp2.ListPrefixLen(withdrawalsLen) + withdrawalsLen } - return payloadSize, unclesLen, withdrawalsLen + // size of Requests + if bfs.Requests != nil { + requestsLen += encodingSizeGeneric(bfs.Requests) + payloadSize += rlp2.ListPrefixLen(requestsLen) + requestsLen + } + + return payloadSize, unclesLen, withdrawalsLen, requestsLen } func (bfs BodyForStorage) EncodeRLP(w io.Writer) error { - payloadSize, unclesLen, withdrawalsLen := bfs.payloadSize() + payloadSize, unclesLen, withdrawalsLen, requestsLen := bfs.payloadSize() var b [33]byte // prefix @@ -805,6 +868,12 @@ func (bfs BodyForStorage) EncodeRLP(w io.Writer) error { return err } } + // encode Requests + if bfs.Requests != nil { + if err := encodeRLPGeneric(bfs.Requests, requestsLen, w, b[:]); err != nil { + return err + } + } return nil } @@ -831,16 +900,20 @@ func (bfs *BodyForStorage) DecodeRLP(s *rlp.Stream) error { if err := decodeWithdrawals(&bfs.Withdrawals, s); err != nil { return err } - + // decode Requests + bfs.Requests = []*Request{} + if err := decodeRequests(&bfs.Requests, s); err != nil { + return err + } return s.ListEnd() } func (bb Body) EncodingSize() int { - payloadSize, _, _, _ := bb.payloadSize() + payloadSize, _, _, _, _ := bb.payloadSize() return payloadSize } -func (bb Body) payloadSize() (payloadSize int, txsLen, unclesLen, withdrawalsLen int) { +func (bb Body) payloadSize() (payloadSize int, txsLen, unclesLen, withdrawalsLen, requestsLen int) { // size of Transactions txsLen += encodingSizeGeneric(bb.Transactions) payloadSize += rlp2.ListPrefixLen(txsLen) + txsLen @@ -855,11 +928,17 @@ func (bb Body) payloadSize() (payloadSize int, txsLen, unclesLen, withdrawalsLen payloadSize += rlp2.ListPrefixLen(withdrawalsLen) + withdrawalsLen } - return payloadSize, txsLen, unclesLen, withdrawalsLen + // size of Requests + if bb.Requests != nil { + requestsLen += encodingSizeGeneric(bb.Requests) + payloadSize += rlp2.ListPrefixLen(requestsLen) + requestsLen + } + + return payloadSize, txsLen, unclesLen, withdrawalsLen, requestsLen } func (bb Body) EncodeRLP(w io.Writer) error { - payloadSize, txsLen, unclesLen, withdrawalsLen := bb.payloadSize() + payloadSize, txsLen, unclesLen, withdrawalsLen, requestsLen := bb.payloadSize() var b [33]byte // prefix if err := EncodeStructSizePrefix(payloadSize, w, b[:]); err != nil { @@ -879,6 +958,12 @@ func (bb Body) EncodeRLP(w io.Writer) error { return err } } + // encode Requests + if bb.Requests != nil { + if err := encodeRLPGeneric(bb.Requests, requestsLen, w, b[:]); err != nil { + return err + } + } return nil } @@ -900,6 +985,10 @@ func (bb *Body) DecodeRLP(s *rlp.Stream) error { if err := decodeWithdrawals(&bb.Withdrawals, s); err != nil { return err } + // decode Requests + if err := decodeRequests(&bb.Requests, s); err != nil { + return err + } return s.ListEnd() } @@ -910,7 +999,7 @@ func (bb *Body) DecodeRLP(s *rlp.Stream) error { // The values of TxHash, UncleHash, ReceiptHash, Bloom, and WithdrawalHash // in the header are ignored and set to the values derived from // the given txs, uncles, receipts, and withdrawals. -func NewBlock(header *Header, txs []Transaction, uncles []*Header, receipts []*Receipt, withdrawals []*Withdrawal) *Block { +func NewBlock(header *Header, txs []Transaction, uncles []*Header, receipts []*Receipt, withdrawals []*Withdrawal, requests []*Request) *Block { b := &Block{header: CopyHeader(header)} // TODO: panic if len(txs) != len(receipts) @@ -957,13 +1046,28 @@ func NewBlock(header *Header, txs []Transaction, uncles []*Header, receipts []*R b.header.ParentBeaconBlockRoot = header.ParentBeaconBlockRoot + if requests == nil { + b.header.RequestsRoot = nil + } else if len(requests) == 0 { + b.header.RequestsRoot = &EmptyRootHash // TODO(racytech): is this correct? + b.requests = make(Requests, len(requests)) + } else { + h := DeriveSha(Requests(requests)) + b.header.RequestsRoot = &h + b.requests = make(Requests, len(requests)) + for i, r := range requests { + rCopy := *r + b.requests[i] = &rCopy + } + } + return b } // NewBlockFromStorage like NewBlock but used to create Block object when read it from DB // in this case no reason to copy parts, or re-calculate headers fields - they are all stored in DB -func NewBlockFromStorage(hash libcommon.Hash, header *Header, txs []Transaction, uncles []*Header, withdrawals []*Withdrawal) *Block { - b := &Block{header: header, transactions: txs, uncles: uncles, withdrawals: withdrawals} +func NewBlockFromStorage(hash libcommon.Hash, header *Header, txs []Transaction, uncles []*Header, withdrawals []*Withdrawal, requests []*Request) *Block { + b := &Block{header: header, transactions: txs, uncles: uncles, withdrawals: withdrawals, requests: requests} b.hash.Store(hash) return b } @@ -983,6 +1087,7 @@ func NewBlockFromNetwork(header *Header, body *Body) *Block { transactions: body.Transactions, uncles: body.Uncles, withdrawals: body.Withdrawals, + requests: body.Requests, } } @@ -1024,6 +1129,10 @@ func CopyHeader(h *Header) *Header { cpy.ParentBeaconBlockRoot = new(libcommon.Hash) cpy.ParentBeaconBlockRoot.SetBytes(h.ParentBeaconBlockRoot.Bytes()) } + if h.RequestsRoot != nil { + cpy.RequestsRoot = new(libcommon.Hash) + cpy.RequestsRoot.SetBytes(h.RequestsRoot.Bytes()) + } return &cpy } @@ -1055,11 +1164,16 @@ func (bb *Block) DecodeRLP(s *rlp.Stream) error { if err := decodeWithdrawals(&bb.withdrawals, s); err != nil { return err } + // decode Requests + bb.requests = []*Request{} + if err := decodeRequests(&bb.requests, s); err != nil { + return err + } return s.ListEnd() } -func (bb Block) payloadSize() (payloadSize int, txsLen, unclesLen, withdrawalsLen int) { +func (bb Block) payloadSize() (payloadSize int, txsLen, unclesLen, withdrawalsLen, requestsLen int) { // size of Header headerLen := bb.header.EncodingSize() payloadSize += rlp2.ListPrefixLen(headerLen) + headerLen @@ -1078,17 +1192,23 @@ func (bb Block) payloadSize() (payloadSize int, txsLen, unclesLen, withdrawalsLe payloadSize += rlp2.ListPrefixLen(withdrawalsLen) + withdrawalsLen } - return payloadSize, txsLen, unclesLen, withdrawalsLen + // size of Requests + if bb.requests != nil { + requestsLen += encodingSizeGeneric(bb.requests) + payloadSize += rlp2.ListPrefixLen(requestsLen) + requestsLen + } + + return payloadSize, txsLen, unclesLen, withdrawalsLen, requestsLen } func (bb Block) EncodingSize() int { - payloadSize, _, _, _ := bb.payloadSize() + payloadSize, _, _, _, _ := bb.payloadSize() return payloadSize } // EncodeRLP serializes b into the Ethereum RLP block format. func (bb Block) EncodeRLP(w io.Writer) error { - payloadSize, txsLen, unclesLen, withdrawalsLen := bb.payloadSize() + payloadSize, txsLen, unclesLen, withdrawalsLen, requestsLen := bb.payloadSize() var b [33]byte // prefix if err := EncodeStructSizePrefix(payloadSize, w, b[:]); err != nil { @@ -1112,6 +1232,12 @@ func (bb Block) EncodeRLP(w io.Writer) error { return err } } + // encode Requests + if bb.requests != nil { + if err := encodeRLPGeneric(bb.requests, requestsLen, w, b[:]); err != nil { + return err + } + } return nil } @@ -1154,6 +1280,8 @@ func (b *Block) BaseFee() *big.Int { func (b *Block) WithdrawalsHash() *libcommon.Hash { return b.header.WithdrawalsHash } func (b *Block) Withdrawals() Withdrawals { return b.withdrawals } func (b *Block) ParentBeaconBlockRoot() *libcommon.Hash { return b.header.ParentBeaconBlockRoot } +func (b *Block) RequestsRoot() *libcommon.Hash { return b.header.RequestsRoot } +func (b *Block) Requests() Requests { return b.requests } // Header returns a deep-copy of the entire block header using CopyHeader() func (b *Block) Header() *Header { return CopyHeader(b.header) } @@ -1161,7 +1289,7 @@ func (b *Block) HeaderNoCopy() *Header { return b.header } // Body returns the non-header content of the block. func (b *Block) Body() *Body { - bd := &Body{Transactions: b.transactions, Uncles: b.uncles, Withdrawals: b.withdrawals} + bd := &Body{Transactions: b.transactions, Uncles: b.uncles, Withdrawals: b.withdrawals, Requests: b.requests} bd.SendersFromTxs() return bd } @@ -1446,6 +1574,25 @@ func decodeWithdrawals(appendList *[]*Withdrawal, s *rlp.Stream) error { return checkErrListEnd(s, err) } +func decodeRequests(appendList *[]*Request, s *rlp.Stream) error { + var err error + if _, err = s.List(); err != nil { + if errors.Is(err, rlp.EOL) { + *appendList = nil + return nil + } + return fmt.Errorf("read requests: %v", err) + } + for err == nil { + var r Request + if err = r.DecodeRLP(s); err != nil { + break + } + *appendList = append(*appendList, &r) + } + return checkErrListEnd(s, err) +} + func checkErrListEnd(s *rlp.Stream, err error) error { if !errors.Is(err, rlp.EOL) { return err diff --git a/core/types/block_test.go b/core/types/block_test.go index 9db421134ac..7d7ac4a4da3 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -358,7 +358,7 @@ func makeBenchBlock() *Block { Extra: []byte("benchmark uncle"), } } - return NewBlock(header, txs, uncles, receipts, nil /* withdrawals */) + return NewBlock(header, txs, uncles, receipts, nil /* withdrawals */, nil /*requests*/) } func TestCanEncodeAndDecodeRawBody(t *testing.T) { @@ -506,7 +506,7 @@ func TestWithdrawalsEncoding(t *testing.T) { Amount: 5_000_000_000, } - block := NewBlock(&header, nil, nil, nil, withdrawals) + block := NewBlock(&header, nil, nil, nil, withdrawals, nil /*requests*/) _ = block.Size() encoded, err := rlp.EncodeToBytes(block) @@ -518,7 +518,7 @@ func TestWithdrawalsEncoding(t *testing.T) { assert.Equal(t, block, &decoded) // Now test with empty withdrawals - block2 := NewBlock(&header, nil, nil, nil, []*Withdrawal{}) + block2 := NewBlock(&header, nil, nil, nil, []*Withdrawal{}, nil /*requests*/) _ = block2.Size() encoded2, err := rlp.EncodeToBytes(block2) diff --git a/core/types/deposit.go b/core/types/deposit.go new file mode 100644 index 00000000000..00b25a66080 --- /dev/null +++ b/core/types/deposit.go @@ -0,0 +1,111 @@ +package types + +import ( + "bytes" + "encoding/binary" + "fmt" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/accounts/abi" + "github.com/ledgerwatch/erigon/rlp" +) + +const ( + pLen = 48 // pubkey size + wLen = 32 // withdrawalCredentials size + sLen = 96 // signature size +) + +var ( + // DepositABI is an ABI instance of beacon chain deposit events. + DepositABI = abi.ABI{Events: map[string]abi.Event{"DepositEvent": depositEvent}} + bytesT, _ = abi.NewType("bytes", "", nil) + depositEvent = abi.NewEvent("DepositEvent", "DepositEvent", false, abi.Arguments{ + {Name: "pubkey", Type: bytesT, Indexed: false}, + {Name: "withdrawal_credentials", Type: bytesT, Indexed: false}, + {Name: "amount", Type: bytesT, Indexed: false}, + {Name: "signature", Type: bytesT, Indexed: false}, + {Name: "index", Type: bytesT, Indexed: false}}, + ) +) + +type Deposit struct { + Pubkey [pLen]byte `json:"pubkey"` // public key of validator + WithdrawalCredentials libcommon.Hash `json:"withdrawalCredentials"` // beneficiary of the validator + Amount uint64 `json:"amount"` // deposit size in Gwei + Signature [sLen]byte `json:"signature"` // signature over deposit msg + Index uint64 `json:"index"` // deposit count value +} + +func (d *Deposit) requestType() byte { return DepositRequestType } +func (d *Deposit) encodeRLP(w *bytes.Buffer) error { return rlp.Encode(w, d) } +func (d *Deposit) decodeRLP(data []byte) error { return rlp.DecodeBytes(data, d) } +func (d *Deposit) copy() RequestData { + return &Deposit{ + Pubkey: d.Pubkey, + WithdrawalCredentials: d.WithdrawalCredentials, + Amount: d.Amount, + Signature: d.Signature, + Index: d.Index, + } +} + +func (d *Deposit) encodingSize() (encodingSize int) { + encodingSize++ + encodingSize += rlp.IntLenExcludingHead(d.Amount) + encodingSize++ + encodingSize += rlp.IntLenExcludingHead(d.Index) + + encodingSize += 180 // 1 + 48 + 1 + 32 + 1 + 1 + 96 (0x80 + pLen, 0x80 + wLen, 0xb8 + 2 + sLen) + return encodingSize +} + +// field type overrides for abi upacking +type depositUnpacking struct { + Pubkey []byte + WithdrawalCredentials []byte + Amount []byte + Signature []byte + Index []byte +} + +// unpackIntoDeposit unpacks a serialized DepositEvent. +func unpackIntoDeposit(data []byte) (*Deposit, error) { + var du depositUnpacking + if err := DepositABI.UnpackIntoInterface(&du, "DepositEvent", data); err != nil { + return nil, err + } + var d Deposit + copy(d.Pubkey[:], du.Pubkey) + copy(d.WithdrawalCredentials[:], du.WithdrawalCredentials) + d.Amount = binary.LittleEndian.Uint64(du.Amount) + copy(d.Signature[:], du.Signature) + d.Index = binary.LittleEndian.Uint64(du.Index) + + return &d, nil +} + +// ParseDepositLogs extracts the EIP-6110 deposit values from logs emitted by +// BeaconDepositContract. +func ParseDepositLogs(logs []*Log, depositContractAddress *libcommon.Address) (Requests, error) { + var deposits Requests + for _, log := range logs { + if log.Address == *depositContractAddress { + d, err := unpackIntoDeposit(log.Data) + if err != nil { + return nil, fmt.Errorf("unable to parse deposit data: %v", err) + } + deposits = append(deposits, NewRequest(d)) + } + } + return deposits, nil +} + +type Deposits []*Deposit + +func (ds Deposits) ToRequests() (reqs Requests) { + for _, d := range ds { + reqs = append(reqs, NewRequest(d)) + } + return +} diff --git a/core/types/encdec_test.go b/core/types/encdec_test.go index f446939d285..082bc8245cf 100644 --- a/core/types/encdec_test.go +++ b/core/types/encdec_test.go @@ -69,6 +69,23 @@ func (tr *TRand) RandWithdrawal() *Withdrawal { } } +func (tr *TRand) RandDeposit() *Deposit { + return &Deposit{ + Pubkey: [48]byte(tr.RandBytes(48)), + WithdrawalCredentials: tr.RandHash(), + Amount: *tr.RandUint64(), + Signature: [96]byte(tr.RandBytes(96)), + Index: *tr.RandUint64(), + } +} + +func (tr *TRand) RandRequest() *Request { + d := tr.RandDeposit() + var r Request + r.inner = d.copy() + return &r +} + func (tr *TRand) RandHeader() *Header { wHash := tr.RandHash() pHash := tr.RandHash() @@ -210,11 +227,21 @@ func (tr *TRand) RandWithdrawals(size int) []*Withdrawal { } return withdrawals } + +func (tr *TRand) RandRequests(size int) []*Request { + requests := make([]*Request, size) + for i := 0; i < size; i++ { + requests[i] = tr.RandRequest() + } + return requests +} + func (tr *TRand) RandRawBody() *RawBody { return &RawBody{ Transactions: tr.RandRawTransactions(tr.RandIntInRange(1, 6)), Uncles: tr.RandHeaders(tr.RandIntInRange(1, 6)), Withdrawals: tr.RandWithdrawals(tr.RandIntInRange(1, 6)), + Requests: tr.RandRequests(tr.RandIntInRange(1, 6)), } } @@ -241,6 +268,7 @@ func (tr *TRand) RandBody() *Body { Transactions: tr.RandTransactions(tr.RandIntInRange(1, 6)), Uncles: tr.RandHeaders(tr.RandIntInRange(1, 6)), Withdrawals: tr.RandWithdrawals(tr.RandIntInRange(1, 6)), + Requests: tr.RandRequests(tr.RandIntInRange(1, 6)), } } @@ -254,13 +282,13 @@ func isEqualBytes(a, b []byte) bool { return true } -func check(t *testing.T, f string, got, want interface{}) { - if !reflect.DeepEqual(got, want) { - t.Errorf("%s mismatch: got %v, want %v", f, got, want) +func check(t *testing.T, f string, want, got interface{}) { + if !reflect.DeepEqual(want, got) { + t.Errorf("%s mismatch: want %v, got %v", f, want, got) } } -func compareHeaders(t *testing.T, a, b *Header) { +func checkHeaders(t *testing.T, a, b *Header) { check(t, "Header.ParentHash", a.ParentHash, b.ParentHash) check(t, "Header.UncleHash", a.UncleHash, b.UncleHash) check(t, "Header.Coinbase", a.Coinbase, b.Coinbase) @@ -283,7 +311,7 @@ func compareHeaders(t *testing.T, a, b *Header) { check(t, "Header.ParentBeaconBlockRoot", a.ParentBeaconBlockRoot, b.ParentBeaconBlockRoot) } -func compareWithdrawals(t *testing.T, a, b *Withdrawal) { +func checkWithdrawals(t *testing.T, a, b *Withdrawal) { check(t, "Withdrawal.Index", a.Index, b.Index) check(t, "Withdrawal.Validator", a.Validator, b.Validator) check(t, "Withdrawal.Address", a.Address, b.Address) @@ -311,85 +339,99 @@ func compareTransactions(t *testing.T, a, b Transaction) { check(t, "Tx.S", s1, s2) } -// func compareDeposits(t *testing.T, a, b *Deposit) { -// check(t, "Deposit.Pubkey", a.Index, b.Index) -// check(t, "Deposit.WithdrawalCredentials", a.WithdrawalCredentials, b.WithdrawalCredentials) -// check(t, "Deposit.Amount", a.Amount, b.Amount) -// check(t, "Deposit.Signature", a.Signature, b.Signature) -// check(t, "Deposit.Index", a.Index, b.Index) -// } - -func compareRawBodies(t *testing.T, a, b *RawBody) error { +func compareDeposits(t *testing.T, a, b *Deposit) { + check(t, "Deposit.Pubkey", a.Pubkey, b.Pubkey) + check(t, "Deposit.WithdrawalCredentials", a.WithdrawalCredentials, b.WithdrawalCredentials) + check(t, "Deposit.Amount", a.Amount, b.Amount) + check(t, "Deposit.Signature", a.Signature, b.Signature) + check(t, "Deposit.Index", a.Index, b.Index) +} - atLen, btLen := len(a.Transactions), len(b.Transactions) - if atLen != btLen { - return fmt.Errorf("transactions len mismatch: expected: %v, got: %v", atLen, btLen) +func checkRequests(t *testing.T, a, b *Request) { + if a.Type() != b.Type() { + t.Errorf("request type mismatch: request-a: %v, request-b: %v", a.Type(), b.Type()) } - for i := 0; i < atLen; i++ { - if !isEqualBytes(a.Transactions[i], b.Transactions[i]) { - return fmt.Errorf("byte transactions are not equal") - } + switch a.Type() { + case DepositRequestType: + c := a.inner.(*Deposit) + d := b.inner.(*Deposit) + compareDeposits(t, c, d) + default: + t.Errorf("unknown request type: %v", a.Type()) } +} - auLen, buLen := len(a.Uncles), len(b.Uncles) +func compareHeaders(t *testing.T, a, b []*Header) error { + auLen, buLen := len(a), len(b) if auLen != buLen { return fmt.Errorf("uncles len mismatch: expected: %v, got: %v", auLen, buLen) } for i := 0; i < auLen; i++ { - compareHeaders(t, a.Uncles[i], b.Uncles[i]) + checkHeaders(t, a[i], b[i]) } + return nil +} - awLen, bwLen := len(a.Withdrawals), len(b.Withdrawals) +func compareWithdrawals(t *testing.T, a, b []*Withdrawal) error { + awLen, bwLen := len(a), len(b) if awLen != bwLen { - return fmt.Errorf("withdrawals len mismatch: expected: %v, got: %v", auLen, buLen) + return fmt.Errorf("withdrawals len mismatch: expected: %v, got: %v", awLen, bwLen) } for i := 0; i < awLen; i++ { - compareWithdrawals(t, a.Withdrawals[i], b.Withdrawals[i]) + checkWithdrawals(t, a[i], b[i]) + } + return nil +} + +func compareRequests(t *testing.T, a, b []*Request) error { + arLen, brLen := len(a), len(b) + if arLen != brLen { + return fmt.Errorf("requests len mismatch: expected: %v, got: %v", arLen, brLen) } + for i := 0; i < arLen; i++ { + checkRequests(t, a[i], b[i]) + } return nil } -func compareBodies(t *testing.T, a, b *Body) error { +func compareRawBodies(t *testing.T, a, b *RawBody) error { atLen, btLen := len(a.Transactions), len(b.Transactions) if atLen != btLen { - return fmt.Errorf("txns len mismatch: expected: %v, got: %v", atLen, btLen) + return fmt.Errorf("transactions len mismatch: expected: %v, got: %v", atLen, btLen) } for i := 0; i < atLen; i++ { - compareTransactions(t, a.Transactions[i], b.Transactions[i]) + if !isEqualBytes(a.Transactions[i], b.Transactions[i]) { + return fmt.Errorf("byte transactions are not equal") + } } - auLen, buLen := len(a.Uncles), len(b.Uncles) - if auLen != buLen { - return fmt.Errorf("uncles len mismatch: expected: %v, got: %v", auLen, buLen) - } + compareHeaders(t, a.Uncles, b.Uncles) + compareWithdrawals(t, a.Withdrawals, b.Withdrawals) + compareRequests(t, a.Requests, b.Requests) - for i := 0; i < auLen; i++ { - compareHeaders(t, a.Uncles[i], b.Uncles[i]) - } + return nil +} - awLen, bwLen := len(a.Withdrawals), len(b.Withdrawals) - if awLen != bwLen { - return fmt.Errorf("withdrawals len mismatch: expected: %v, got: %v", awLen, bwLen) - } +func compareBodies(t *testing.T, a, b *Body) error { - for i := 0; i < awLen; i++ { - compareWithdrawals(t, a.Withdrawals[i], b.Withdrawals[i]) + atLen, btLen := len(a.Transactions), len(b.Transactions) + if atLen != btLen { + return fmt.Errorf("txns len mismatch: expected: %v, got: %v", atLen, btLen) } - // adLen, bdLen := len(a.deposits), len(b.deposits) - // if adLen != bdLen { - // return fmt.Errorf("deposits len mismatch: expected: %v, got: %v", adLen, bdLen) - // } + for i := 0; i < atLen; i++ { + compareTransactions(t, a.Transactions[i], b.Transactions[i]) + } - // for i := 0; i < adLen; i++ { - // compareDeposits(t, a.deposits[i], b.deposits[i]) - // } + compareHeaders(t, a.Uncles, b.Uncles) + compareWithdrawals(t, a.Withdrawals, b.Withdrawals) + compareRequests(t, a.Requests, b.Requests) return nil } @@ -440,3 +482,23 @@ func TestBodyEncodeDecodeRLP(t *testing.T) { } } } + +func TestDepositEncodeDecode(t *testing.T) { + tr := NewTRand() + var buf bytes.Buffer + for i := 0; i < RUNS; i++ { + enc := tr.RandRequest() + buf.Reset() + if err := enc.EncodeRLP(&buf); err != nil { + t.Errorf("error: deposit.EncodeRLP(): %v", err) + } + s := rlp.NewStream(bytes.NewReader(buf.Bytes()), 0) + dec := &Request{} + if err := dec.DecodeRLP(s); err != nil { + t.Errorf("error: Deposit.DecodeRLP(): %v", err) + } + a := enc.inner.(*Deposit) + b := dec.inner.(*Deposit) + compareDeposits(t, a, b) + } +} diff --git a/core/types/request.go b/core/types/request.go new file mode 100644 index 00000000000..1423be32c62 --- /dev/null +++ b/core/types/request.go @@ -0,0 +1,111 @@ +package types + +import ( + "bytes" + "fmt" + "io" + + rlp2 "github.com/ledgerwatch/erigon-lib/rlp" + "github.com/ledgerwatch/erigon/rlp" +) + +const ( + DepositRequestType byte = 0x00 +) + +type Request struct { + inner RequestData +} + +type RequestData interface { + encodeRLP(*bytes.Buffer) error + decodeRLP([]byte) error + requestType() byte + copy() RequestData + encodingSize() int +} + +func (r *Request) Type() byte { + return r.inner.requestType() +} + +func NewRequest(inner RequestData) *Request { + req := new(Request) + req.inner = inner.copy() + return req +} + +func (r *Request) EncodingSize() int { + switch r.Type() { + case DepositRequestType: + total := r.inner.encodingSize() + 1 // +1 byte for requset type + return rlp2.ListPrefixLen(total) + total + default: + panic(fmt.Sprintf("Unknown request type: %d", r.Type())) + } +} + +func (r *Request) EncodeRLP(w io.Writer) error { + var buf bytes.Buffer // TODO(racytech): find a solution to reuse the same buffer instead of recreating it + buf.WriteByte(r.Type()) // first write type of request then encode inner data + r.inner.encodeRLP(&buf) + return rlp.Encode(w, buf.Bytes()) +} + +func (r *Request) DecodeRLP(s *rlp.Stream) error { + kind, _, err := s.Kind() + switch { + case err != nil: + return err + case kind == rlp.List: + return fmt.Errorf("error: untyped request (unexpected lit)") + case kind == rlp.Byte: + return fmt.Errorf("error: too short request") + default: + var buf []byte + if buf, err = s.Bytes(); err != nil { + return err + } + return r.decode(buf) + } +} + +func (r *Request) decode(data []byte) error { + if len(data) <= 1 { + return fmt.Errorf("error: too short type request") + } + var inner RequestData + switch data[0] { + case DepositRequestType: + inner = new(Deposit) + default: + return fmt.Errorf("unknown request type - %d", data[0]) + } + + if err := inner.decodeRLP(data[1:]); err != nil { + return err + } + r.inner = inner + return nil +} + +func (r Requests) Deposits() Deposits { + deposits := make(Deposits, 0, len(r)) + for _, req := range r { + if req.Type() == DepositRequestType { + deposits = append(deposits, req.inner.(*Deposit)) + } + } + return deposits +} + +type Requests []*Request + +func (r Requests) Len() int { return len(r) } + +// EncodeIndex encodes the i'th request to w. Note that this does not check for errors +// because we assume that *request will only ever contain valid requests that were either +// constructed by decoding or via public API in this package. +func (r Requests) EncodeIndex(i int, w *bytes.Buffer) { + rlp.Encode(w, r[i]) +} diff --git a/eth/stagedsync/stage_mining_create_block.go b/eth/stagedsync/stage_mining_create_block.go index a2edf156ded..ea1a5b3e032 100644 --- a/eth/stagedsync/stage_mining_create_block.go +++ b/eth/stagedsync/stage_mining_create_block.go @@ -32,6 +32,7 @@ type MiningBlock struct { Receipts types.Receipts Withdrawals []*types.Withdrawal PreparedTxs types.TransactionsStream + Requests []*types.Request } type MiningState struct { diff --git a/eth/stagedsync/stage_mining_exec.go b/eth/stagedsync/stage_mining_exec.go index 8cc2da1e96f..f6a21970149 100644 --- a/eth/stagedsync/stage_mining_exec.go +++ b/eth/stagedsync/stage_mining_exec.go @@ -176,12 +176,12 @@ func SpawnMiningExecStage(s *StageState, txc wrap.TxContainer, cfg MiningExecCfg } var err error - _, current.Txs, current.Receipts, err = core.FinalizeBlockExecution(cfg.engine, stateReader, current.Header, current.Txs, current.Uncles, &state.NoopWriter{}, &cfg.chainConfig, ibs, current.Receipts, current.Withdrawals, ChainReaderImpl{config: &cfg.chainConfig, tx: txc.Tx, blockReader: cfg.blockReader, logger: logger}, true, logger) + _, current.Txs, current.Receipts, err = core.FinalizeBlockExecution(cfg.engine, stateReader, current.Header, current.Txs, current.Uncles, &state.NoopWriter{}, &cfg.chainConfig, ibs, current.Receipts, current.Withdrawals, current.Requests, ChainReaderImpl{config: &cfg.chainConfig, tx: txc.Tx, blockReader: cfg.blockReader, logger: logger}, true, logger) if err != nil { return err } - block := types.NewBlock(current.Header, current.Txs, current.Uncles, current.Receipts, current.Withdrawals) + block := types.NewBlock(current.Header, current.Txs, current.Uncles, current.Receipts, current.Withdrawals, current.Requests) // Simulate the block execution to get the final state root if err := rawdb.WriteHeader(txc.Tx, block.Header()); err != nil { return fmt.Errorf("cannot write header: %s", err) diff --git a/eth/stagedsync/stage_mining_finish.go b/eth/stagedsync/stage_mining_finish.go index d3d36dfbab6..408a7990e71 100644 --- a/eth/stagedsync/stage_mining_finish.go +++ b/eth/stagedsync/stage_mining_finish.go @@ -52,7 +52,7 @@ func SpawnMiningFinishStage(s *StageState, tx kv.RwTx, cfg MiningFinishCfg, quit // continue //} - block := types.NewBlock(current.Header, current.Txs, current.Uncles, current.Receipts, current.Withdrawals) + block := types.NewBlock(current.Header, current.Txs, current.Uncles, current.Receipts, current.Withdrawals, current.Requests) blockWithReceipts := &types.BlockWithReceipts{Block: block, Receipts: current.Receipts} *current = MiningBlock{} // hack to clean global data diff --git a/polygon/bor/bor.go b/polygon/bor/bor.go index 504d6d1ec7f..05525941386 100644 --- a/polygon/bor/bor.go +++ b/polygon/bor/bor.go @@ -974,7 +974,7 @@ func (c *Bor) CalculateRewards(config *chain.Config, header *types.Header, uncle // Finalize implements consensus.Engine, ensuring no uncles are set, nor block // rewards given. func (c *Bor) Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, + txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, chain consensus.ChainReader, syscall consensus.SystemCall, logger log.Logger, ) (types.Transactions, types.Receipts, error) { headerNumber := header.Number.Uint64() @@ -1038,7 +1038,7 @@ func (c *Bor) changeContractCodeIfNeeded(headerNumber uint64, state *state.Intra // FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set, // nor block rewards given, and returns the final block. func (c *Bor) FinalizeAndAssemble(chainConfig *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, + txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger, ) (*types.Block, types.Transactions, types.Receipts, error) { // stateSyncData := []*types.StateSyncData{} @@ -1078,7 +1078,7 @@ func (c *Bor) FinalizeAndAssemble(chainConfig *chain.Config, header *types.Heade header.UncleHash = types.CalcUncleHash(nil) // Assemble block - block := types.NewBlock(header, txs, nil, receipts, withdrawals) + block := types.NewBlock(header, txs, nil, receipts, withdrawals, requests) // set state sync // bc := chain.(*core.BlockChain) diff --git a/polygon/bor/fake.go b/polygon/bor/fake.go index fb79b7642da..fc3485eef54 100644 --- a/polygon/bor/fake.go +++ b/polygon/bor/fake.go @@ -21,8 +21,8 @@ func NewFaker() *FakeBor { } func (f *FakeBor) Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, + txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, chain consensus.ChainReader, syscall consensus.SystemCall, logger log.Logger, ) (types.Transactions, types.Receipts, error) { - return f.FakeEthash.Finalize(config, header, state, txs, uncles, r, withdrawals, chain, syscall, logger) + return f.FakeEthash.Finalize(config, header, state, txs, uncles, r, withdrawals, requests, chain, syscall, logger) } diff --git a/polygon/p2p/message_listener_test.go b/polygon/p2p/message_listener_test.go index 601fd38fe47..dc55a209d61 100644 --- a/polygon/p2p/message_listener_test.go +++ b/polygon/p2p/message_listener_test.go @@ -416,7 +416,7 @@ func blockHeadersPacket66Bytes(t *testing.T, requestId uint64, headers []*types. func newMockNewBlockPacketBytes(t *testing.T) []byte { newBlockPacket := eth.NewBlockPacket{ - Block: types.NewBlock(newMockBlockHeaders(1)[0], nil, nil, nil, nil), + Block: types.NewBlock(newMockBlockHeaders(1)[0], nil, nil, nil, nil, nil), } newBlockPacketBytes, err := rlp.EncodeToBytes(&newBlockPacket) require.NoError(t, err) diff --git a/tests/statedb_insert_chain_transaction_test.go b/tests/statedb_insert_chain_transaction_test.go index 63edc94e617..e4499feace1 100644 --- a/tests/statedb_insert_chain_transaction_test.go +++ b/tests/statedb_insert_chain_transaction_test.go @@ -52,7 +52,7 @@ func TestInsertIncorrectStateRootDifferentAccounts(t *testing.T) { t.Fatal("roots are the same") } - incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil) + incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil, nil) incorrectChain := &core.ChainPack{Blocks: []*types.Block{incorrectBlock}, Headers: []*types.Header{&incorrectHeader}, TopBlock: incorrectBlock} if err = m.InsertChain(incorrectChain); err == nil { t.Fatal("should fail") @@ -119,7 +119,7 @@ func TestInsertIncorrectStateRootSameAccount(t *testing.T) { t.Fatal("roots are the same") } - incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil) + incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil, nil) incorrectChain := &core.ChainPack{Blocks: []*types.Block{incorrectBlock}, Headers: []*types.Header{&incorrectHeader}, TopBlock: incorrectBlock} if err = m.InsertChain(incorrectChain); err == nil { t.Fatal("should fail") @@ -181,7 +181,7 @@ func TestInsertIncorrectStateRootSameAccountSameAmount(t *testing.T) { incorrectHeader := *chain.Headers[0] // Copy header, not just pointer incorrectHeader.Root = chain.Headers[1].Root - incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil) + incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil, nil) incorrectChain := &core.ChainPack{Blocks: []*types.Block{incorrectBlock}, Headers: []*types.Header{&incorrectHeader}, TopBlock: incorrectBlock} if err = m.InsertChain(incorrectChain); err == nil { t.Fatal("should fail") @@ -243,7 +243,7 @@ func TestInsertIncorrectStateRootAllFundsRoot(t *testing.T) { incorrectHeader := *chain.Headers[0] // Copy header, not just pointer incorrectHeader.Root = chain.Headers[1].Root - incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil) + incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil, nil) incorrectChain := &core.ChainPack{Blocks: []*types.Block{incorrectBlock}, Headers: []*types.Header{&incorrectHeader}, TopBlock: incorrectBlock} if err = m.InsertChain(incorrectChain); err == nil { t.Fatal("should fail") @@ -304,7 +304,7 @@ func TestInsertIncorrectStateRootAllFunds(t *testing.T) { // BLOCK 1 incorrectHeader := *chain.Headers[0] // Copy header, not just pointer incorrectHeader.Root = chain.Headers[1].Root - incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil) + incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil, nil) incorrectChain := &core.ChainPack{Blocks: []*types.Block{incorrectBlock}, Headers: []*types.Header{&incorrectHeader}, TopBlock: incorrectBlock} if err = m.InsertChain(incorrectChain); err == nil { @@ -385,7 +385,7 @@ func TestAccountDeployIncorrectRoot(t *testing.T) { incorrectHeader := *chain.Headers[1] // Copy header, not just pointer incorrectHeader.Root = chain.Headers[0].Root - incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[1].Transactions(), chain.Blocks[1].Uncles(), chain.Receipts[1], nil) + incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[1].Transactions(), chain.Blocks[1].Uncles(), chain.Receipts[1], nil, nil) incorrectChain := &core.ChainPack{Blocks: []*types.Block{incorrectBlock}, Headers: []*types.Header{&incorrectHeader}, TopBlock: incorrectBlock} // BLOCK 2 - INCORRECT @@ -492,7 +492,7 @@ func TestAccountCreateIncorrectRoot(t *testing.T) { // BLOCK 3 - INCORRECT incorrectHeader := *chain.Headers[2] // Copy header, not just pointer incorrectHeader.Root = chain.Headers[1].Root - incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[2].Transactions(), chain.Blocks[2].Uncles(), chain.Receipts[2], nil) + incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[2].Transactions(), chain.Blocks[2].Uncles(), chain.Receipts[2], nil, nil) incorrectChain := &core.ChainPack{Blocks: []*types.Block{incorrectBlock}, Headers: []*types.Header{&incorrectHeader}, TopBlock: incorrectBlock} if err = m.InsertChain(incorrectChain); err == nil { @@ -581,7 +581,7 @@ func TestAccountUpdateIncorrectRoot(t *testing.T) { // BLOCK 4 - INCORRECT incorrectHeader := *chain.Headers[3] // Copy header, not just pointer incorrectHeader.Root = chain.Headers[1].Root - incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[3].Transactions(), chain.Blocks[3].Uncles(), chain.Receipts[3], nil) + incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[3].Transactions(), chain.Blocks[3].Uncles(), chain.Receipts[3], nil, nil) incorrectChain := &core.ChainPack{Blocks: []*types.Block{incorrectBlock}, Headers: []*types.Header{&incorrectHeader}, TopBlock: incorrectBlock} if err = m.InsertChain(incorrectChain); err == nil { @@ -669,7 +669,7 @@ func TestAccountDeleteIncorrectRoot(t *testing.T) { // BLOCK 4 - INCORRECT incorrectHeader := *chain.Headers[3] // Copy header, not just pointer incorrectHeader.Root = chain.Headers[1].Root - incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[3].Transactions(), chain.Blocks[3].Uncles(), chain.Receipts[3], nil) + incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[3].Transactions(), chain.Blocks[3].Uncles(), chain.Receipts[3], nil, nil) incorrectChain := &core.ChainPack{Blocks: []*types.Block{incorrectBlock}, Headers: []*types.Header{&incorrectHeader}, TopBlock: incorrectBlock} if err = m.InsertChain(incorrectChain); err == nil { t.Fatal("should fail") diff --git a/turbo/engineapi/engine_block_downloader/block_downloader.go b/turbo/engineapi/engine_block_downloader/block_downloader.go index 25e975b60d9..510c5cf4fa4 100644 --- a/turbo/engineapi/engine_block_downloader/block_downloader.go +++ b/turbo/engineapi/engine_block_downloader/block_downloader.go @@ -264,7 +264,7 @@ func (e *EngineBlockDownloader) insertHeadersAndBodies(ctx context.Context, tx k if body == nil { return fmt.Errorf("missing body at block=%d", number) } - blocksBatch = append(blocksBatch, types.NewBlockFromStorage(hash, header, body.Transactions, body.Uncles, body.Withdrawals)) + blocksBatch = append(blocksBatch, types.NewBlockFromStorage(hash, header, body.Transactions, body.Uncles, body.Withdrawals, body.Requests)) if number%uint64(blockWrittenLogSize) == 0 { e.logger.Info("[insertHeadersAndBodies] Written blocks", "progress", number, "to", toBlock) } diff --git a/turbo/engineapi/engine_server.go b/turbo/engineapi/engine_server.go index eb0fdde6b04..328cff188b0 100644 --- a/turbo/engineapi/engine_server.go +++ b/turbo/engineapi/engine_server.go @@ -115,9 +115,9 @@ func (e *EngineServer) Start( } } -func (s *EngineServer) checkWithdrawalsPresence(time uint64, withdrawals []*types.Withdrawal) error { +func (s *EngineServer) checkWithdrawalsPresence(time uint64, withdrawals types.Withdrawals) error { if !s.config.IsShanghai(time) && withdrawals != nil { - return &rpc.InvalidParamsError{Message: "withdrawals before shanghai"} + return &rpc.InvalidParamsError{Message: "withdrawals before Shanghai"} } if s.config.IsShanghai(time) && withdrawals == nil { return &rpc.InvalidParamsError{Message: "missing withdrawals list"} @@ -125,6 +125,16 @@ func (s *EngineServer) checkWithdrawalsPresence(time uint64, withdrawals []*type return nil } +func (s *EngineServer) checkRequestsPresence(time uint64, requests types.Requests) error { + if !s.config.IsPrague(time) && requests != nil { + return &rpc.InvalidParamsError{Message: "requests before Prague"} + } + if s.config.IsPrague(time) && requests == nil { + return &rpc.InvalidParamsError{Message: "missing requests list"} + } + return nil +} + // EngineNewPayload validates and possibly executes payload func (s *EngineServer) newPayload(ctx context.Context, req *engine_types.ExecutionPayload, expectedBlobHashes []libcommon.Hash, parentBeaconBlockRoot *libcommon.Hash, version clparams.StateVersion, @@ -159,19 +169,30 @@ func (s *EngineServer) newPayload(ctx context.Context, req *engine_types.Executi ReceiptHash: req.ReceiptsRoot, TxHash: types.DeriveSha(types.BinaryTransactions(txs)), } - var withdrawals []*types.Withdrawal + + var withdrawals types.Withdrawals if version >= clparams.CapellaVersion { withdrawals = req.Withdrawals } - + if err := s.checkWithdrawalsPresence(header.Time, withdrawals); err != nil { + return nil, err + } if withdrawals != nil { - wh := types.DeriveSha(types.Withdrawals(withdrawals)) + wh := types.DeriveSha(withdrawals) header.WithdrawalsHash = &wh } - if err := s.checkWithdrawalsPresence(header.Time, withdrawals); err != nil { + var requests types.Requests + if version >= clparams.ElectraVersion && req.DepositRequests != nil { + requests = req.DepositRequests.ToRequests() + } + if err := s.checkRequestsPresence(header.Time, requests); err != nil { return nil, err } + if requests != nil { + rh := types.DeriveSha(requests) + header.RequestsRoot = &rh + } if version <= clparams.CapellaVersion { if req.BlobGasUsed != nil { @@ -260,7 +281,7 @@ func (s *EngineServer) newPayload(ctx context.Context, req *engine_types.Executi defer s.lock.Unlock() s.logger.Debug("[NewPayload] sending block", "height", header.Number, "hash", blockHash) - block := types.NewBlockFromStorage(blockHash, &header, transactions, nil /* uncles */, withdrawals) + block := types.NewBlockFromStorage(blockHash, &header, transactions, nil /* uncles */, withdrawals, requests) payloadStatus, err := s.HandleNewPayload(ctx, "NewPayload", block, expectedBlobHashes) if err != nil { @@ -617,6 +638,14 @@ func (e *EngineServer) GetPayloadV3(ctx context.Context, payloadID hexutility.By return e.getPayload(ctx, decodedPayloadId, clparams.DenebVersion) } +// Same as [GetPayloadV3], but returning ExecutionPayloadV4 (= ExecutionPayloadV3 + requests) +// See https://github.com/ethereum/execution-apis/blob/main/src/engine/prague.md#engine_getpayloadv4 +func (e *EngineServer) GetPayloadV4(ctx context.Context, payloadID hexutility.Bytes) (*engine_types.GetPayloadResponse, error) { + decodedPayloadId := binary.BigEndian.Uint64(payloadID) + e.logger.Info("Received GetPayloadV4", "payloadId", decodedPayloadId) + return e.getPayload(ctx, decodedPayloadId, clparams.ElectraVersion) +} + // Updates the forkchoice state after validating the headBlockHash // Additionally, builds and returns a unique identifier for an initial version of a payload // (asynchronously updated with transactions), if payloadAttributes is not nil and passes validation @@ -656,6 +685,15 @@ func (e *EngineServer) NewPayloadV3(ctx context.Context, payload *engine_types.E return e.newPayload(ctx, payload, expectedBlobHashes, parentBeaconBlockRoot, clparams.DenebVersion) } +// NewPayloadV4 processes new payloads (blocks) from the beacon chain with withdrawals, blob gas and requests. +// See https://github.com/ethereum/execution-apis/blob/main/src/engine/prague.md#engine_newpayloadv4 +func (e *EngineServer) NewPayloadV4(ctx context.Context, payload *engine_types.ExecutionPayload, + expectedBlobHashes []libcommon.Hash, parentBeaconBlockRoot *libcommon.Hash) (*engine_types.PayloadStatus, error) { + // TODO(racytech): add proper version or refactor this part + // add all version ralated checks here so the newpayload doesn't have to deal with checks + return e.newPayload(ctx, payload, expectedBlobHashes, parentBeaconBlockRoot, clparams.ElectraVersion) +} + // Receives consensus layer's transition configuration and checks if the execution layer has the correct configuration. // Can also be used to ping the execution layer (heartbeats). // See https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.1/src/engine/specification.md#engine_exchangetransitionconfigurationv1 @@ -710,9 +748,11 @@ var ourCapabilities = []string{ "engine_newPayloadV1", "engine_newPayloadV2", "engine_newPayloadV3", + "engine_newPayloadV4", "engine_getPayloadV1", "engine_getPayloadV2", "engine_getPayloadV3", + "engine_getPayloadV4", "engine_exchangeTransitionConfigurationV1", "engine_getPayloadBodiesByHashV1", "engine_getPayloadBodiesByRangeV1", diff --git a/turbo/engineapi/engine_types/jsonrpc.go b/turbo/engineapi/engine_types/jsonrpc.go index 1eb40d177bf..2cf3334c8b9 100644 --- a/turbo/engineapi/engine_types/jsonrpc.go +++ b/turbo/engineapi/engine_types/jsonrpc.go @@ -17,23 +17,24 @@ import ( // ExecutionPayload represents an execution payload (aka block) type ExecutionPayload struct { - ParentHash common.Hash `json:"parentHash" gencodec:"required"` - FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` - StateRoot common.Hash `json:"stateRoot" gencodec:"required"` - ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` - LogsBloom hexutility.Bytes `json:"logsBloom" gencodec:"required"` - PrevRandao common.Hash `json:"prevRandao" gencodec:"required"` - BlockNumber hexutil.Uint64 `json:"blockNumber" gencodec:"required"` - GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` - ExtraData hexutility.Bytes `json:"extraData" gencodec:"required"` - BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` - BlockHash common.Hash `json:"blockHash" gencodec:"required"` - Transactions []hexutility.Bytes `json:"transactions" gencodec:"required"` - Withdrawals []*types.Withdrawal `json:"withdrawals"` - BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` - ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` + ParentHash common.Hash `json:"parentHash" gencodec:"required"` + FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` + StateRoot common.Hash `json:"stateRoot" gencodec:"required"` + ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` + LogsBloom hexutility.Bytes `json:"logsBloom" gencodec:"required"` + PrevRandao common.Hash `json:"prevRandao" gencodec:"required"` + BlockNumber hexutil.Uint64 `json:"blockNumber" gencodec:"required"` + GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` + ExtraData hexutility.Bytes `json:"extraData" gencodec:"required"` + BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` + BlockHash common.Hash `json:"blockHash" gencodec:"required"` + Transactions []hexutility.Bytes `json:"transactions" gencodec:"required"` + Withdrawals []*types.Withdrawal `json:"withdrawals"` + BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` + ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` + DepositRequests types.Deposits `json:"depositRequests"` // do not forget to add it into erigon-lib/gointerfaces/types if needed } // PayloadAttributes represent the attributes required to start assembling a payload diff --git a/turbo/engineapi/interface.go b/turbo/engineapi/interface.go index a17068b817b..ea26d7cd5d7 100644 --- a/turbo/engineapi/interface.go +++ b/turbo/engineapi/interface.go @@ -14,12 +14,14 @@ type EngineAPI interface { NewPayloadV1(context.Context, *engine_types.ExecutionPayload) (*engine_types.PayloadStatus, error) NewPayloadV2(context.Context, *engine_types.ExecutionPayload) (*engine_types.PayloadStatus, error) NewPayloadV3(ctx context.Context, executionPayload *engine_types.ExecutionPayload, expectedBlobHashes []common.Hash, parentBeaconBlockRoot *common.Hash) (*engine_types.PayloadStatus, error) + NewPayloadV4(ctx context.Context, executionPayload *engine_types.ExecutionPayload, expectedBlobHashes []common.Hash, parentBeaconBlockRoot *common.Hash) (*engine_types.PayloadStatus, error) ForkchoiceUpdatedV1(ctx context.Context, forkChoiceState *engine_types.ForkChoiceState, payloadAttributes *engine_types.PayloadAttributes) (*engine_types.ForkChoiceUpdatedResponse, error) ForkchoiceUpdatedV2(ctx context.Context, forkChoiceState *engine_types.ForkChoiceState, payloadAttributes *engine_types.PayloadAttributes) (*engine_types.ForkChoiceUpdatedResponse, error) ForkchoiceUpdatedV3(ctx context.Context, forkChoiceState *engine_types.ForkChoiceState, payloadAttributes *engine_types.PayloadAttributes) (*engine_types.ForkChoiceUpdatedResponse, error) GetPayloadV1(ctx context.Context, payloadID hexutility.Bytes) (*engine_types.ExecutionPayload, error) GetPayloadV2(ctx context.Context, payloadID hexutility.Bytes) (*engine_types.GetPayloadResponse, error) GetPayloadV3(ctx context.Context, payloadID hexutility.Bytes) (*engine_types.GetPayloadResponse, error) + GetPayloadV4(ctx context.Context, payloadID hexutility.Bytes) (*engine_types.GetPayloadResponse, error) ExchangeTransitionConfigurationV1(ctx context.Context, transitionConfiguration *engine_types.TransitionConfiguration) (*engine_types.TransitionConfiguration, error) GetPayloadBodiesByHashV1(ctx context.Context, hashes []common.Hash) ([]*engine_types.ExecutionPayloadBodyV1, error) GetPayloadBodiesByRangeV1(ctx context.Context, start, count hexutil.Uint64) ([]*engine_types.ExecutionPayloadBodyV1, error) diff --git a/turbo/execution/eth1/block_building.go b/turbo/execution/eth1/block_building.go index ec9cc6e9e5e..21460e5956f 100644 --- a/turbo/execution/eth1/block_building.go +++ b/turbo/execution/eth1/block_building.go @@ -65,6 +65,8 @@ func (e *EthereumExecutionModule) AssembleBlock(ctx context.Context, req *execut param.ParentBeaconBlockRoot = &pbbr } + // TODO(racytech): add requests (Pectra) + // First check if we're already building a block with the requested parameters if e.lastParameters != nil { param.PayloadId = e.lastParameters.PayloadId diff --git a/turbo/execution/eth1/eth1_chain_reader.go/chain_reader.go b/turbo/execution/eth1/eth1_chain_reader.go/chain_reader.go index 5132f58b271..14b8b0db66f 100644 --- a/turbo/execution/eth1/eth1_chain_reader.go/chain_reader.go +++ b/turbo/execution/eth1/eth1_chain_reader.go/chain_reader.go @@ -107,7 +107,7 @@ func (c ChainReaderWriterEth1) GetBlockByHash(ctx context.Context, hash libcommo log.Warn("[engine] GetBlockByHash", "err", err) return nil } - return types.NewBlock(header, txs, nil, nil, body.Withdrawals) + return types.NewBlock(header, txs, nil, nil, body.Withdrawals, body.Requests) } func (c ChainReaderWriterEth1) GetBlockByNumber(ctx context.Context, number uint64) *types.Block { @@ -136,7 +136,7 @@ func (c ChainReaderWriterEth1) GetBlockByNumber(ctx context.Context, number uint log.Warn("[engine] GetBlockByNumber", "err", err) return nil } - return types.NewBlock(header, txs, nil, nil, body.Withdrawals) + return types.NewBlock(header, txs, nil, nil, body.Withdrawals, body.Requests) } func (c ChainReaderWriterEth1) GetHeaderByHash(ctx context.Context, hash libcommon.Hash) *types.Header { diff --git a/turbo/execution/eth1/eth1_utils/grpc_test.go b/turbo/execution/eth1/eth1_utils/grpc_test.go index eeb684d5062..3c593337c34 100644 --- a/turbo/execution/eth1/eth1_utils/grpc_test.go +++ b/turbo/execution/eth1/eth1_utils/grpc_test.go @@ -58,7 +58,14 @@ func makeBlock(txCount, uncleCount, withdrawalCount int) *types.Block { Amount: uint64(10 * i), } } - return types.NewBlock(header, txs, uncles, receipts, withdrawals) + for i := range withdrawals { + withdrawals[i] = &types.Withdrawal{ + Index: uint64(i), + Validator: uint64(i), + Amount: uint64(10 * i), + } + } + return types.NewBlock(header, txs, uncles, receipts, withdrawals, nil) // TODO(racytech): add requests } func TestBlockRpcConversion(t *testing.T) { diff --git a/turbo/snapshotsync/freezeblocks/block_reader.go b/turbo/snapshotsync/freezeblocks/block_reader.go index d9f62adab31..a18b348655f 100644 --- a/turbo/snapshotsync/freezeblocks/block_reader.go +++ b/turbo/snapshotsync/freezeblocks/block_reader.go @@ -520,7 +520,6 @@ func (r *BlockReader) BodyWithTransactions(ctx context.Context, tx kv.Getter, ha log.Info(dbgPrefix + "found in db=false") } } - view := r.sn.View() defer view.Close() @@ -703,7 +702,7 @@ func (r *BlockReader) blockWithSenders(ctx context.Context, tx kv.Getter, hash c return } if txsAmount == 0 { - block = types.NewBlockFromStorage(hash, h, nil, b.Uncles, b.Withdrawals) + block = types.NewBlockFromStorage(hash, h, nil, b.Uncles, b.Withdrawals, b.Requests) if len(senders) != block.Transactions().Len() { if dbgLogs { log.Info(dbgPrefix + fmt.Sprintf("found block with %d transactions, but %d senders", block.Transactions().Len(), len(senders))) @@ -726,7 +725,7 @@ func (r *BlockReader) blockWithSenders(ctx context.Context, tx kv.Getter, hash c if err != nil { return nil, nil, err } - block = types.NewBlockFromStorage(hash, h, txs, b.Uncles, b.Withdrawals) + block = types.NewBlockFromStorage(hash, h, txs, b.Uncles, b.Withdrawals, b.Requests) if len(senders) != block.Transactions().Len() { if dbgLogs { log.Info(dbgPrefix + fmt.Sprintf("found block with %d transactions, but %d senders", block.Transactions().Len(), len(senders))) @@ -811,10 +810,10 @@ func (r *BlockReader) bodyFromSnapshot(blockHeight uint64, sn *Segment, buf []by if b == nil { return nil, 0, 0, buf, nil } - body := new(types.Body) body.Uncles = b.Uncles body.Withdrawals = b.Withdrawals + body.Requests = b.Requests var txsAmount uint32 if b.TxAmount >= 2 { txsAmount = b.TxAmount - 2