diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index 4deb081ed8f6..b409b19260e0 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -694,41 +694,19 @@ func showMetaData(ctx *cli.Context) error { if err != nil { fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err) } - pp := func(val *uint64) string { - if val == nil { - return "" - } - return fmt.Sprintf("%d (%#x)", *val, *val) - } - data := [][]string{ - {"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))}, - {"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))}, - {"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))}, - {"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}} + data := rawdb.ReadChainMetadata(db) + data = append(data, []string{"frozen", fmt.Sprintf("%d items", ancients)}) + data = append(data, []string{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))}) if b := rawdb.ReadHeadBlock(db); b != nil { data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())}) data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())}) data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (%#x)", b.Number(), b.Number())}) } - if b := rawdb.ReadSkeletonSyncStatus(db); b != nil { - data = append(data, []string{"SkeletonSyncStatus", string(b)}) - } if h := rawdb.ReadHeadHeader(db); h != nil { data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())}) data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)}) data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (%#x)", h.Number, h.Number)}) } - data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)}, - {"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))}, - {"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))}, - {"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))}, - {"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))}, - {"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))}, - {"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))}, - {"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))}, - {"txIndexTail", pp(rawdb.ReadTxIndexTail(db))}, - {"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))}, - }...) table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"Field", "Value"}) table.AppendBulk(data) diff --git a/core/rawdb/database.go b/core/rawdb/database.go index ef80c251a457..4804dd86ff28 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -202,6 +202,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st // Create the idle freezer instance frdb, err := newChainFreezer(resolveChainFreezerDir(ancient), namespace, readonly, freezerTableSize, chainFreezerNoSnappy) if err != nil { + printChainMetadata(db) return nil, err } // Since the freezer can be stored separately from the user's key-value database, @@ -233,8 +234,10 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st // the freezer and the key-value store. frgenesis, err := frdb.Ancient(chainFreezerHashTable, 0) if err != nil { + printChainMetadata(db) return nil, fmt.Errorf("failed to retrieve genesis from ancient %v", err) } else if !bytes.Equal(kvgenesis, frgenesis) { + printChainMetadata(db) return nil, fmt.Errorf("genesis mismatch: %#x (leveldb) != %#x (ancients)", kvgenesis, frgenesis) } // Key-value store and freezer belong to the same network. Ensure that they @@ -242,8 +245,19 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st if kvhash, _ := db.Get(headerHashKey(frozen)); len(kvhash) == 0 { // Subsequent header after the freezer limit is missing from the database. // Reject startup if the database has a more recent head. - if ldbNum := *ReadHeaderNumber(db, ReadHeadHeaderHash(db)); ldbNum > frozen-1 { - return nil, fmt.Errorf("gap in the chain between ancients (#%d) and leveldb (#%d) ", frozen, ldbNum) + if head := *ReadHeaderNumber(db, ReadHeadHeaderHash(db)); head > frozen-1 { + // Find the smallest block stored in the key-value store + // in range of [frozen, head] + var number uint64 + for number = frozen; number <= head; number++ { + if present, _ := db.Has(headerHashKey(number)); present { + break + } + } + // We are about to exit on error. Print database metdata beore exiting + printChainMetadata(db) + return nil, fmt.Errorf("gap in the chain between ancients [0 - #%d] and leveldb [#%d - #%d] ", + frozen-1, number, head) } // Database contains only older data than the freezer, this happens if the // state was wiped and reinited from an existing freezer. @@ -260,6 +274,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st // Key-value store contains more data than the genesis block, make sure we // didn't freeze anything yet. if kvblob, _ := db.Get(headerHashKey(1)); len(kvblob) == 0 { + printChainMetadata(db) return nil, errors.New("ancient chain segments already extracted, please set --datadir.ancient to the correct path") } // Block #1 is still in the database, we're allowed to init a new freezer @@ -581,3 +596,42 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { } return nil } + +// printChainMetadata prints out chain metadata to stderr. +func printChainMetadata(db ethdb.KeyValueStore) { + fmt.Fprintf(os.Stderr, "Chain metadata\n") + for _, v := range ReadChainMetadata(db) { + fmt.Fprintf(os.Stderr, " %s\n", strings.Join(v, ": ")) + } + fmt.Fprintf(os.Stderr, "\n\n") +} + +// ReadChainMetadata returns a set of key/value pairs that contains informatin +// about the database chain status. This can be used for diagnostic purposes +// when investigating the state of the node. +func ReadChainMetadata(db ethdb.KeyValueStore) [][]string { + pp := func(val *uint64) string { + if val == nil { + return "" + } + return fmt.Sprintf("%d (%#x)", *val, *val) + } + data := [][]string{ + {"databaseVersion", pp(ReadDatabaseVersion(db))}, + {"headBlockHash", fmt.Sprintf("%v", ReadHeadBlockHash(db))}, + {"headFastBlockHash", fmt.Sprintf("%v", ReadHeadFastBlockHash(db))}, + {"headHeaderHash", fmt.Sprintf("%v", ReadHeadHeaderHash(db))}, + {"lastPivotNumber", pp(ReadLastPivotNumber(db))}, + {"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(ReadSnapshotSyncStatus(db)))}, + {"snapshotDisabled", fmt.Sprintf("%v", ReadSnapshotDisabled(db))}, + {"snapshotJournal", fmt.Sprintf("%d bytes", len(ReadSnapshotJournal(db)))}, + {"snapshotRecoveryNumber", pp(ReadSnapshotRecoveryNumber(db))}, + {"snapshotRoot", fmt.Sprintf("%v", ReadSnapshotRoot(db))}, + {"txIndexTail", pp(ReadTxIndexTail(db))}, + {"fastTxLookupLimit", pp(ReadFastTxLookupLimit(db))}, + } + if b := ReadSkeletonSyncStatus(db); b != nil { + data = append(data, []string{"SkeletonSyncStatus", string(b)}) + } + return data +}