Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
Mercybudda committed Dec 7, 2021
1 parent ec56f6d commit b019eb7
Show file tree
Hide file tree
Showing 7 changed files with 33 additions and 128 deletions.
2 changes: 2 additions & 0 deletions cmd/geth/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,8 @@ var (
utils.MinerNotifyFullFlag,
configFileFlag,
utils.CatalystFlag,
utils.AncientBackUpFlag,
utils.GenesisFlag,
}

rpcFlags = []cli.Flag{
Expand Down
37 changes: 9 additions & 28 deletions cmd/geth/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,21 +82,6 @@ WARNING: It's necessary to delete the trie clean cache after the pruning.
If you specify another directory for the trie clean cache via "--cache.trie.journal"
during the use of Geth, please also specify it here for correct deletion. Otherwise
the trie clean cache with default directory will be deleted.
`,
},
{
Name: "prune-block-pre-backup",
Usage: "Back up the ancient block data",
ArgsUsage: "<root>",
Action: utils.MigrateFlags(pruneBlockPreBackUp),
Category: "MISCELLANEOUS COMMANDS",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.AncientFlag,
utils.AncientBackUpFlag,
},
Description: `
Back up the ancient block data offline before prune block started.
`,
},
{
Expand All @@ -109,6 +94,7 @@ Back up the ancient block data offline before prune block started.
utils.DataDirFlag,
utils.AncientFlag,
utils.AncientBackUpFlag,
utils.GenesisFlag,
},
Description: `
Offline prune for block data.
Expand Down Expand Up @@ -183,9 +169,12 @@ It's also usable without snapshot enabled.
}
)

func pruneBlockPreBackUp(ctx *cli.Context) error {
func pruneBlock(ctx *cli.Context) error {
stack, config := makeConfigNode(ctx)
//defer stack.Close()
chaindb := utils.MakeChainDatabase(ctx, stack, false)
// Make sure we have a valid genesis JSON
genesisPath := ctx.Args().First()
genesisPath := ctx.GlobalString(utils.GenesisFlag.Name)
if len(genesisPath) == 0 {
utils.Fatalf("Must supply path to genesis JSON file")
}
Expand All @@ -199,14 +188,10 @@ func pruneBlockPreBackUp(ctx *cli.Context) error {
if err := json.NewDecoder(file).Decode(genesis); err != nil {
utils.Fatalf("invalid genesis file: %v", err)
}

stack, config := makeConfigNode(ctx)
defer stack.Close()
freezer := config.Eth.DatabaseFreezer
chaindb := utils.MakeChainDatabase(ctx, stack, false)
if err != nil {
utils.Fatalf("Failed to open ancient database: %v", err)
utils.Fatalf("Failed to decode genesis: %v", err)
}
freezer := config.Eth.DatabaseFreezer

for _, name := range []string{"chaindata"} {
root := stack.ResolvePath(name) // /Users/user/storage/Private_BSC_Storage/build/bin/node/geth/chaindata
Expand All @@ -221,16 +206,12 @@ func pruneBlockPreBackUp(ctx *cli.Context) error {
utils.Fatalf("Failed to create block pruner", err)
}
backfreezer := filepath.Join(root, "ancient_back_up")
if err := pruner.BlockPruneBackUp(name, config.Eth.DatabaseCache, config.Eth.DatabaseHandles, backfreezer, "eth/db/chaindata/", false); err != nil {
if err := pruner.BlockPruneBackUp(name, config.Eth.DatabaseCache, utils.MakeDatabaseHandles(), backfreezer, "", false); err != nil {
log.Error("Failed to back up block", "err", err)
return err
}
}
log.Info("geth block offline pruning backup successfully")
return nil
}

func pruneBlock(ctx *cli.Context) error {
oldAncientPath := ctx.GlobalString(utils.AncientFlag.Name)
newAncientPath := ctx.GlobalString(utils.AncientBackUpFlag.Name)
if err := pruner.BlockPrune(oldAncientPath, newAncientPath); err != nil {
Expand Down
8 changes: 6 additions & 2 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,8 +135,12 @@ var (
Usage: "Data directory for ancient chain segments (default = inside chaindata)",
}
AncientBackUpFlag = DirectoryFlag{
Name: "datadir.ancient",
Usage: "Data directory for ancient directory backup (default = inside chaindata)",
Name: "datadir.backup",
Usage: "Data directory for ancient directory backup",
}
GenesisFlag = DirectoryFlag{
Name: "datadir.genesis",
Usage: "Data directory for genesis file",
}
DiffFlag = DirectoryFlag{
Name: "datadir.diff",
Expand Down
5 changes: 5 additions & 0 deletions core/rawdb/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,11 @@ func (db *nofreezedb) AppendAncient(number uint64, hash, header, body, receipts,
return errNotSupported
}

// AppendAncientNoBody returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) AppendAncientNoBody(number uint64, hash, header, receipts, td []byte) error {
return errNotSupported
}

// TruncateAncients returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) TruncateAncients(items uint64) error {
return errNotSupported
Expand Down
47 changes: 0 additions & 47 deletions core/rawdb/freezer.go
Original file line number Diff line number Diff line change
Expand Up @@ -240,53 +240,6 @@ func (f *freezer) AppendAncient(number uint64, hash, header, body, receipts, td
return nil
}

// AppendAncient injects all binary blobs except for block body at the end of the
// append-only immutable table files.
//
// Notably, this function is lock free but kind of thread-safe. All out-of-order
// injection will be rejected. But if two injections with same number happen at
// the same time, we can get into the trouble.
func (f *freezer) AppendAncientNoBody(number uint64, hash, header, receipts, td []byte) (err error) {
if f.readonly {
return errReadOnly
}
// Ensure the binary blobs we are appending is continuous with freezer.
if atomic.LoadUint64(&f.frozen) != number {
return errOutOrderInsertion
}
// Rollback all inserted data if any insertion below failed to ensure
// the tables won't out of sync.
defer func() {
if err != nil {
rerr := f.repair()
if rerr != nil {
log.Crit("Failed to repair freezer", "err", rerr)
}
log.Info("Append ancient failed", "number", number, "err", err)
}
}()
// Inject all the components into the relevant data tables
if err := f.tables[freezerHashTable].Append(f.frozen, hash[:]); err != nil {
log.Error("Failed to append ancient hash", "number", f.frozen, "hash", hash, "err", err)
return err
}
if err := f.tables[freezerHeaderTable].Append(f.frozen, header); err != nil {
log.Error("Failed to append ancient header", "number", f.frozen, "hash", hash, "err", err)
return err
}

if err := f.tables[freezerReceiptTable].Append(f.frozen, receipts); err != nil {
log.Error("Failed to append ancient receipts", "number", f.frozen, "hash", hash, "err", err)
return err
}
if err := f.tables[freezerDifficultyTable].Append(f.frozen, td); err != nil {
log.Error("Failed to append ancient difficulty", "number", f.frozen, "hash", hash, "err", err)
return err
}
atomic.AddUint64(&f.frozen, 1) // Only modify atomically
return nil
}

// TruncateAncients discards any recent data above the provided threshold number.
func (f *freezer) TruncateAncients(items uint64) error {
if f.readonly {
Expand Down
59 changes: 11 additions & 48 deletions core/state/pruner/pruner.go
Original file line number Diff line number Diff line change
Expand Up @@ -262,70 +262,33 @@ func (p *BlockPruner) BlockPruneBackUp(name string, cache, handles int, backFree
//Back-up the necessary data within original ancient directory, create new freezer backup directory backFreezer
//db, err = rawdb.NewLevelDBDatabaseWithFreezer(root, cache, handles, backFreezer, namespace, readonly)
start := time.Now()
chainDb := p.db
chainDbBack, err := p.n.OpenDatabaseWithFreezer(name, cache, handles, backFreezer, namespace, readonly)
if err != nil {
log.Error("Failed to open ancient database: %v", err)
return err
}

//write back-up data to new chainDb
// Restore the last known head block

//write genesis block firstly
genesis := p.genesis
if _, _, err := core.SetupGenesisBlock(chainDbBack, genesis); err != nil {
log.Error("Failed to write genesis block: %v", err)
return err
}

//write most recent 128 blocks data
headBlock := rawdb.ReadHeadBlock(chainDb)
if headBlock == nil {
return errors.New("Failed to load head block")
//write the latest 128 blocks data of the ancient db
// If we can't access the freezer or it's empty, abort
frozen, err := p.db.Ancients()
if err != nil || frozen == 0 {
return errors.New("Can't access the freezer or it's empty, abort")
}
lastBlockNumber := headBlock.NumberU64()

//For block number 1 to current block-128, only back-up receipts, difficulties, block number->hash but no body data anymore
for blockNumber := lastBlockNumber - 128; blockNumber >= 1; blockNumber-- {
blockHash := rawdb.ReadCanonicalHash(chainDb, blockNumber)
block := rawdb.ReadBlock(chainDb, blockHash, blockNumber)
receipts := rawdb.ReadRawReceipts(chainDb, blockHash, blockNumber)
// Calculate the total difficulty of the block
td := rawdb.ReadTd(chainDb, blockHash, blockNumber)
if td == nil {
return consensus.ErrUnknownAncestor
}
externTd := new(big.Int).Add(block.Difficulty(), td)
// Encode all block components to RLP format.
headerBlob, err := rlp.EncodeToBytes(block.Header())
if err != nil {
log.Crit("Failed to RLP encode block header", "err", err)
}

storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
for i, receipt := range receipts {
storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
}
receiptBlob, err := rlp.EncodeToBytes(storageReceipts)
if err != nil {
log.Crit("Failed to RLP encode block receipts", "err", err)
}
tdBlob, err := rlp.EncodeToBytes(externTd)
if err != nil {
log.Crit("Failed to RLP encode block total difficulty", "err", err)
}
// Write all blob to flatten files.
err = chainDbBack.AppendAncientNoBody(block.NumberU64(), block.Hash().Bytes(), headerBlob, receiptBlob, tdBlob)
if err != nil {
log.Crit("Failed to write block data to ancient store", "err", err)
}

return nil
start_index := frozen - 128
if start_index < 0 {
start_index = 0
}

//All ancient data within the most recent 128 blocks write into new ancient_back directory
for blockNumber := lastBlockNumber - 127; blockNumber <= lastBlockNumber; blockNumber++ {
chainDb := p.db
for blockNumber := start_index; blockNumber < frozen; blockNumber++ {
blockHash := rawdb.ReadCanonicalHash(chainDb, blockNumber)
block := rawdb.ReadBlock(chainDb, blockHash, blockNumber)
receipts := rawdb.ReadRawReceipts(chainDb, blockHash, blockNumber)
Expand All @@ -337,6 +300,7 @@ func (p *BlockPruner) BlockPruneBackUp(name string, cache, handles int, backFree
externTd := new(big.Int).Add(block.Difficulty(), td)
rawdb.WriteAncientBlock(chainDbBack, block, receipts, externTd)
}
//chainDb.TruncateAncients(start_index - 1)

chainDb.Close()
chainDbBack.Close()
Expand All @@ -359,7 +323,6 @@ func BlockPrune(oldAncientPath, newAncientPath string) error {
return err
}
return nil

}

// Prune deletes all historical state nodes except the nodes belong to the
Expand Down
3 changes: 0 additions & 3 deletions ethdb/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,9 +95,6 @@ type AncientWriter interface {
// Sync flushes all in-memory ancient store data to disk.
Sync() error

// AppendAncient injects all binary blobs except for block body at the end of the
// append-only immutable table files.
AppendAncientNoBody(number uint64, hash, header, receipts, td []byte) error
}

// Reader contains the methods required to read data from both key-value as well as
Expand Down

0 comments on commit b019eb7

Please sign in to comment.