diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index 8dc65d6656a6..846ca169be74 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -109,16 +109,9 @@ data, and verifies that all snapshot storage data has a corresponding account. ArgsUsage: "
", Action: utils.MigrateFlags(checkAccount), Category: "MISCELLANEOUS COMMANDS", - Flags: []cli.Flag{ - utils.DataDirFlag, - utils.AncientFlag, - utils.RopstenFlag, - utils.SepoliaFlag, - utils.RinkebyFlag, - utils.GoerliFlag, - }, + Flags: utils.GroupFlags(utils.NetworkFlags, utils.DatabasePathFlags), Description: ` -geth snapshot inspect-account
checks all snapshot layers and prints out +geth snapshot inspect-account
checks all snapshot layers and prints out information about the specified address. `, }, @@ -543,8 +536,10 @@ func checkAccount(ctx *cli.Context) error { if ctx.NArg() != 1 { return errors.New("need arg") } - var addr common.Address - var hash common.Hash + var ( + hash common.Hash + addr common.Address + ) switch len(ctx.Args()[0]) { case 40, 42: addr = common.HexToAddress(ctx.Args()[0]) diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index c97db05a7ca9..50594371ad2d 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -35,6 +35,9 @@ import ( const journalVersion uint64 = 0 +// errNoJournal is returned if there is no snapshot journal in disk. +var errNoJournal = errors.New("snapshot journal is not-existent") + // journalGenerator is a disk layer entry containing the generator progress marker. type journalGenerator struct { // Indicator that whether the database was in progress of being wiped. @@ -108,16 +111,15 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou // So if there is no journal, or the journal is invalid(e.g. the journal // is not matched with disk layer; or the it's the legacy-format journal, // etc.), we just discard all diffs and try to recover them later. - var parentLayer snapshot - parentLayer = base + var current snapshot = base err := iterateJournal(db, func(parent common.Hash, root common.Hash, destructSet map[common.Hash]struct{}, accountData map[common.Hash][]byte, storageData map[common.Hash]map[common.Hash][]byte) error { - parentLayer = newDiffLayer(parentLayer, root, destructSet, accountData, storageData) + current = newDiffLayer(current, root, destructSet, accountData, storageData) return nil }) if err != nil { return base, generator, nil } - return parentLayer, generator, nil + return current, generator, nil } // loadSnapshot loads a pre-existing state snapshot backed by a key-value store. @@ -272,7 +274,7 @@ func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) { type journalCallback = func(parent common.Hash, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error // iterateJournal iterates through the journalled difflayers, loading them from -// the datababase, and invoking the callback for each loaded layer. +// the database, and invoking the callback for each loaded layer. // The order is incremental; starting with the bottom-most difflayer, going towards // the most recent layer. // This method returns error either if there was some error reading from disk, @@ -281,7 +283,7 @@ func iterateJournal(db ethdb.KeyValueReader, callback journalCallback) error { journal := rawdb.ReadSnapshotJournal(db) if len(journal) == 0 { log.Warn("Loaded snapshot journal", "diffs", "missing") - return errors.New("missing journal") + return errNoJournal } r := rlp.NewStream(bytes.NewReader(journal), 0) // Firstly, resolve the first element as the journal version @@ -359,63 +361,3 @@ func iterateJournal(db ethdb.KeyValueReader, callback journalCallback) error { parent = root } } - -// CheckJournalAccount shows information about an account, from the disk layer and -// up through the diff layers. -func CheckJournalAccount(db ethdb.KeyValueStore, hash common.Hash) error { - // Look up the disk layer first - baseRoot := rawdb.ReadSnapshotRoot(db) - fmt.Printf("Disklayer: Root: %x\n", baseRoot) - if data := rawdb.ReadAccountSnapshot(db, hash); data != nil { - account := new(Account) - if err := rlp.DecodeBytes(data, account); err != nil { - panic(err) - } - fmt.Printf("\taccount.nonce: %d\n", account.Nonce) - fmt.Printf("\taccount.balance: %x\n", account.Balance) - fmt.Printf("\taccount.root: %x\n", account.Root) - fmt.Printf("\taccount.codehash: %x\n", account.CodeHash) - } - // Check storage - { - it := rawdb.NewKeyLengthIterator(db.NewIterator(append(rawdb.SnapshotStoragePrefix, hash.Bytes()...), nil), 1+2*common.HashLength) - fmt.Printf("\tStorage:\n") - for it.Next() { - slot := it.Key()[33:] - fmt.Printf("\t\t%x: %x\n", slot, it.Value()) - } - it.Release() - } - var depth = 0 - - return iterateJournal(db, func(pRoot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error { - _, a := accounts[hash] - _, b := destructs[hash] - _, c := storage[hash] - depth++ - if !a && !b && !c { - return nil - } - fmt.Printf("Disklayer+%d: Root: %x, parent %x\n", depth, root, pRoot) - if data, ok := accounts[hash]; ok { - account := new(Account) - if err := rlp.DecodeBytes(data, account); err != nil { - panic(err) - } - fmt.Printf("\taccount.nonce: %d\n", account.Nonce) - fmt.Printf("\taccount.balance: %x\n", account.Balance) - fmt.Printf("\taccount.root: %x\n", account.Root) - fmt.Printf("\taccount.codehash: %x\n", account.CodeHash) - } - if _, ok := destructs[hash]; ok { - fmt.Printf("\t Destructed!") - } - if data, ok := storage[hash]; ok { - fmt.Printf("\tStorage\n") - for k, v := range data { - fmt.Printf("\t\t%x: %x\n", k, v) - } - } - return nil - }) -} diff --git a/core/state/snapshot/dangling.go b/core/state/snapshot/utils.go similarity index 56% rename from core/state/snapshot/dangling.go rename to core/state/snapshot/utils.go index b4f2dc3bb733..505ad15a99f8 100644 --- a/core/state/snapshot/dangling.go +++ b/core/state/snapshot/utils.go @@ -18,6 +18,7 @@ package snapshot import ( "bytes" + "errors" "fmt" "time" @@ -25,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" ) // CheckDanglingStorage iterates the snap storage data, and verifies that all @@ -72,9 +74,9 @@ func checkDanglingDiskStorage(chaindb ethdb.KeyValueStore) error { // checkDanglingMemStorage checks if there is any 'dangling' storage in the journalled // snapshot difflayers. func checkDanglingMemStorage(db ethdb.KeyValueStore) error { - log.Info("Checking dangling journalled storage") start := time.Now() - iterateJournal(db, func(pRoot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error { + log.Info("Checking dangling journalled storage") + err := iterateJournal(db, func(pRoot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error { for accHash := range storage { if _, ok := accounts[accHash]; !ok { log.Error("Dangling storage - missing account", "account", fmt.Sprintf("%#x", accHash), "root", root) @@ -82,6 +84,74 @@ func checkDanglingMemStorage(db ethdb.KeyValueStore) error { } return nil }) + if !errors.Is(err, errNoJournal) { + log.Info("Failed to resolve snapshot journal", "err", err) + return err + } log.Info("Verified the snapshot journalled storage", "time", common.PrettyDuration(time.Since(start))) return nil } + +// CheckJournalAccount shows information about an account, from the disk layer and +// up through the diff layers. +func CheckJournalAccount(db ethdb.KeyValueStore, hash common.Hash) error { + // Look up the disk layer first + baseRoot := rawdb.ReadSnapshotRoot(db) + fmt.Printf("Disklayer: Root: %x\n", baseRoot) + if data := rawdb.ReadAccountSnapshot(db, hash); data != nil { + account := new(Account) + if err := rlp.DecodeBytes(data, account); err != nil { + panic(err) + } + fmt.Printf("\taccount.nonce: %d\n", account.Nonce) + fmt.Printf("\taccount.balance: %x\n", account.Balance) + fmt.Printf("\taccount.root: %x\n", account.Root) + fmt.Printf("\taccount.codehash: %x\n", account.CodeHash) + } + // Check storage + { + it := rawdb.NewKeyLengthIterator(db.NewIterator(append(rawdb.SnapshotStoragePrefix, hash.Bytes()...), nil), 1+2*common.HashLength) + fmt.Printf("\tStorage:\n") + for it.Next() { + slot := it.Key()[33:] + fmt.Printf("\t\t%x: %x\n", slot, it.Value()) + } + it.Release() + } + var depth = 0 + + err := iterateJournal(db, func(pRoot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error { + _, a := accounts[hash] + _, b := destructs[hash] + _, c := storage[hash] + depth++ + if !a && !b && !c { + return nil + } + fmt.Printf("Disklayer+%d: Root: %x, parent %x\n", depth, root, pRoot) + if data, ok := accounts[hash]; ok { + account := new(Account) + if err := rlp.DecodeBytes(data, account); err != nil { + panic(err) + } + fmt.Printf("\taccount.nonce: %d\n", account.Nonce) + fmt.Printf("\taccount.balance: %x\n", account.Balance) + fmt.Printf("\taccount.root: %x\n", account.Root) + fmt.Printf("\taccount.codehash: %x\n", account.CodeHash) + } + if _, ok := destructs[hash]; ok { + fmt.Printf("\t Destructed!") + } + if data, ok := storage[hash]; ok { + fmt.Printf("\tStorage\n") + for k, v := range data { + fmt.Printf("\t\t%x: %x\n", k, v) + } + } + return nil + }) + if errors.Is(err, errNoJournal) { + return nil + } + return err +}