From 6498101403d53bf9591a7c5a3661c8a0481d6089 Mon Sep 17 00:00:00 2001 From: GalaIO Date: Mon, 4 Mar 2024 08:41:41 +0800 Subject: [PATCH] feat: support blob storage & miscs; (#2229) * chainconfig: use cancun fork for BSC; feat: fill WithdrawalsHash when BSC enable cancun fork; * rawdb: support to CRUD blobs; freezer: support to freeze block blobs; * freezer: support to freeze block blobs; * blockchain: add blob cache & blob query helper; * blockchain: add blob cache & blob query helper; * freezer: refactor addition table logic, add uts; * blobexpiry: add more extra expiry time, and logs; * ci: fix UT fails; * fix: fix some PR review comments; * parlia: implement IsDataAvailable function; * blob: refactor blob transfer logic; * blob: support config blob extra reserve; --- cmd/geth/main.go | 1 + cmd/utils/flags.go | 12 +++ consensus/consensus.go | 1 + consensus/parlia/blob_sidecar.go | 40 +++++++++ consensus/parlia/parlia.go | 60 ++++++++++--- core/blockchain.go | 31 +++++++ core/blockchain_reader.go | 17 ++++ core/chain_makers.go | 3 + core/genesis.go | 3 + core/rawdb/accessors_chain.go | 128 ++++++++++++++++++++++++++++ core/rawdb/accessors_chain_test.go | 122 ++++++++++++++++++++++++++- core/rawdb/ancient_scheme.go | 4 + core/rawdb/chain_freezer.go | 119 +++++++++++++++++++++++++- core/rawdb/database.go | 29 +++++-- core/rawdb/freezer.go | 131 +++++++++++++++++++++++++++-- core/rawdb/freezer_batch.go | 14 ++- core/rawdb/freezer_resettable.go | 7 ++ core/rawdb/freezer_table.go | 51 +++++++++++ core/rawdb/freezer_table_test.go | 95 +++++++++++++++++++++ core/rawdb/freezer_test.go | 66 +++++++++++++++ core/rawdb/prunedfreezer.go | 8 ++ core/rawdb/schema.go | 7 ++ core/rawdb/table.go | 8 ++ core/state/pruner/pruner.go | 18 +++- core/types/block.go | 18 ++++ core/types/tx_blob.go | 2 + core/types/tx_blob_test.go | 57 +++++++++++++ eth/backend.go | 8 ++ eth/ethconfig/config.go | 6 +- ethdb/database.go | 21 ++++- ethdb/remotedb/remotedb.go | 12 +++ miner/worker.go | 3 + params/config.go | 15 +++- params/protocol_params.go | 3 + 34 files changed, 1078 insertions(+), 42 deletions(-) create mode 100644 consensus/parlia/blob_sidecar.go diff --git a/cmd/geth/main.go b/cmd/geth/main.go index e1242912f2..12d164655e 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -172,6 +172,7 @@ var ( utils.VoteJournalDirFlag, utils.LogDebugFlag, utils.LogBacktraceAtFlag, + utils.BlobExtraReserveFlag, }, utils.NetworkFlags, utils.DatabaseFlags) rpcFlags = []cli.Flag{ diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 3514734c90..37a650f078 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1094,6 +1094,13 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server. Usage: "Path for the voteJournal dir in fast finality feature (default = inside the datadir)", Category: flags.FastFinalityCategory, } + + // Blob setting + BlobExtraReserveFlag = &cli.Int64Flag{ + Name: "blob.extra-reserve", + Usage: "Extra reserve threshold for blob, blob never expires when -1 is set, default 28800", + Value: params.BlobExtraReserveThreshold, + } ) var ( @@ -2112,6 +2119,11 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if err := kzg4844.UseCKZG(ctx.String(CryptoKZGFlag.Name) == "ckzg"); err != nil { Fatalf("Failed to set KZG library implementation to %s: %v", ctx.String(CryptoKZGFlag.Name), err) } + + // blob setting + if ctx.IsSet(BlobExtraReserveFlag.Name) { + cfg.BlobExtraReserve = ctx.Int64(BlobExtraReserveFlag.Name) + } } // SetDNSDiscoveryDefaults configures DNS discovery with the given URL if diff --git a/consensus/consensus.go b/consensus/consensus.go index cb5f1841ae..e1b56e99d0 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -157,4 +157,5 @@ type PoSA interface { GetFinalizedHeader(chain ChainHeaderReader, header *types.Header) *types.Header VerifyVote(chain ChainHeaderReader, vote *types.VoteEnvelope) error IsActiveValidatorAt(chain ChainHeaderReader, header *types.Header, checkVoteKeyFn func(bLSPublicKey *types.BLSPublicKey) bool) bool + IsDataAvailable(chain ChainHeaderReader, block *types.Block) error } diff --git a/consensus/parlia/blob_sidecar.go b/consensus/parlia/blob_sidecar.go new file mode 100644 index 0000000000..5b7a87da24 --- /dev/null +++ b/consensus/parlia/blob_sidecar.go @@ -0,0 +1,40 @@ +package parlia + +import ( + "crypto/sha256" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/kzg4844" +) + +// validateBlobSidecar it is same as validateBlobSidecar in core/txpool/validation.go +func validateBlobSidecar(hashes []common.Hash, sidecar *types.BlobTxSidecar) error { + if len(sidecar.Blobs) != len(hashes) { + return fmt.Errorf("invalid number of %d blobs compared to %d blob hashes", len(sidecar.Blobs), len(hashes)) + } + if len(sidecar.Commitments) != len(hashes) { + return fmt.Errorf("invalid number of %d blob commitments compared to %d blob hashes", len(sidecar.Commitments), len(hashes)) + } + if len(sidecar.Proofs) != len(hashes) { + return fmt.Errorf("invalid number of %d blob proofs compared to %d blob hashes", len(sidecar.Proofs), len(hashes)) + } + // Blob quantities match up, validate that the provers match with the + // transaction hash before getting to the cryptography + hasher := sha256.New() + for i, vhash := range hashes { + computed := kzg4844.CalcBlobHashV1(hasher, &sidecar.Commitments[i]) + if vhash != computed { + return fmt.Errorf("blob %d: computed hash %#x mismatches transaction one %#x", i, computed, vhash) + } + } + // Blob commitments match with the hashes in the transaction, verify the + // blobs themselves via KZG + for i := range sidecar.Blobs { + if err := kzg4844.VerifyBlobProof(sidecar.Blobs[i], sidecar.Commitments[i], sidecar.Proofs[i]); err != nil { + return fmt.Errorf("invalid blob %d: %v", i, err) + } + } + return nil +} diff --git a/consensus/parlia/parlia.go b/consensus/parlia/parlia.go index 78f7587810..1369459b6a 100644 --- a/consensus/parlia/parlia.go +++ b/consensus/parlia/parlia.go @@ -350,6 +350,46 @@ func (p *Parlia) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*typ return abort, results } +// IsDataAvailable it checks that the blobTx block has available blob data +func (p *Parlia) IsDataAvailable(chain consensus.ChainHeaderReader, block *types.Block) error { + if !p.chainConfig.IsCancun(block.Number(), block.Time()) { + return nil + } + // only required to check within BlobReserveThreshold block's DA + currentHeader := chain.CurrentHeader() + if block.NumberU64() < currentHeader.Number.Uint64()-params.BlobReserveThreshold { + return nil + } + + // alloc block's versionedHashes + versionedHashes := make([][]common.Hash, 0, len(block.Transactions())) + for _, tx := range block.Transactions() { + versionedHashes = append(versionedHashes, tx.BlobHashes()) + } + blobs := block.Blobs() + if len(versionedHashes) != len(blobs) { + return errors.New("blobs do not match the versionedHashes length") + } + + // check blob amount + blobCnt := 0 + for _, h := range versionedHashes { + blobCnt += len(h) + } + if blobCnt > params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob { + return fmt.Errorf("too many blobs in block: have %d, permitted %d", blobCnt, params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) + } + + // check blob and versioned hash + for i := range versionedHashes { + if err := validateBlobSidecar(versionedHashes[i], blobs[i]); err != nil { + return err + } + } + + return nil +} + // getValidatorBytesFromHeader returns the validators bytes extracted from the header's extra field if exists. // The validators bytes would be contained only in the epoch block's header, and its each validator bytes length is fixed. // On luban fork, we introduce vote attestation into the header's extra field, so extra format is different from before. @@ -598,20 +638,18 @@ func (p *Parlia) verifyHeader(chain consensus.ChainHeaderReader, header *types.H return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", header.ExcessBlobGas) case header.BlobGasUsed != nil: return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", header.BlobGasUsed) + case header.ParentBeaconRoot != nil: + return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", header.ParentBeaconRoot) + case header.WithdrawalsHash != nil: + return fmt.Errorf("invalid WithdrawalsHash, have %#x, expected nil", header.WithdrawalsHash) } } else { - if err := eip4844.VerifyEIP4844Header(parent, header); err != nil { - return err + switch { + case header.ParentBeaconRoot != nil: + return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", header.ParentBeaconRoot) + case *header.WithdrawalsHash != common.Hash{}: + return errors.New("header has wrong WithdrawalsHash") } - } - - if !cancun && header.ExcessBlobGas != nil { - return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", header.ExcessBlobGas) - } - if !cancun && header.BlobGasUsed != nil { - return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", header.BlobGasUsed) - } - if cancun { if err := eip4844.VerifyEIP4844Header(parent, header); err != nil { return err } diff --git a/core/blockchain.go b/core/blockchain.go index 839ef17726..8b79436f99 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -110,6 +110,7 @@ const ( blockCacheLimit = 256 diffLayerCacheLimit = 1024 receiptsCacheLimit = 10000 + blobsCacheLimit = 10000 txLookupCacheLimit = 1024 maxBadBlockLimit = 16 maxFutureBlocks = 256 @@ -277,6 +278,7 @@ type BlockChain struct { receiptsCache *lru.Cache[common.Hash, []*types.Receipt] blockCache *lru.Cache[common.Hash, *types.Block] txLookupCache *lru.Cache[common.Hash, txLookup] + blobsCache *lru.Cache[common.Hash, types.BlobTxSidecars] // future blocks are blocks added for later processing futureBlocks *lru.Cache[common.Hash, *types.Block] @@ -358,6 +360,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit), bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit), receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit), + blobsCache: lru.NewCache[common.Hash, types.BlobTxSidecars](blobsCacheLimit), blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit), txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit), futureBlocks: lru.NewCache[common.Hash, *types.Block](maxFutureBlocks), @@ -989,6 +992,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha bc.bodyCache.Purge() bc.bodyRLPCache.Purge() bc.receiptsCache.Purge() + bc.blobsCache.Purge() bc.blockCache.Purge() bc.txLookupCache.Purge() bc.futureBlocks.Purge() @@ -1376,6 +1380,10 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ // Write all chain data to ancients. td := bc.GetTd(first.Hash(), first.NumberU64()) + // TODO(GalaIO): when sync the history block, it needs store blobs too. + //if isCancun() { + // writeSize, err := rawdb.WriteAncientBlocksWithBlobs(bc.db, blockChain, receiptChain, td, blobs) + //} writeSize, err := rawdb.WriteAncientBlocks(bc.db, blockChain, receiptChain, td) if err != nil { log.Error("Error importing chain data to ancients", "err", err) @@ -1454,6 +1462,10 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ // Write all the data out into the database rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i]) + // TODO(GalaIO): if enable cancun, need write blobs + //if bc.chainConfig.IsCancun(block.Number(), block.Time()) { + // rawdb.WriteBlobs(batch, block.Hash(), block.NumberU64(), blobs) + //} // Write everything belongs to the blocks into the database. So that // we can ensure all components of body is completed(body, receipts) @@ -1523,6 +1535,10 @@ func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (e batch := bc.db.NewBatch() rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), td) rawdb.WriteBlock(batch, block) + // if enable cancun, it needs to write blobs too + if bc.chainConfig.IsCancun(block.Number(), block.Time()) { + rawdb.WriteBlobs(batch, block.Hash(), block.NumberU64(), block.Blobs()) + } if err := batch.Write(); err != nil { log.Crit("Failed to write block into disk", "err", err) } @@ -1565,6 +1581,10 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd) rawdb.WriteBlock(blockBatch, block) rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts) + // if enable cancun, it needs to write blobs too + if bc.chainConfig.IsCancun(block.Number(), block.Time()) { + rawdb.WriteBlobs(blockBatch, block.Hash(), block.NumberU64(), block.Blobs()) + } rawdb.WritePreimages(blockBatch, state.Preimages()) if err := blockBatch.Write(); err != nil { log.Crit("Failed to write block into disk", "err", err) @@ -1802,6 +1822,7 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { // racey behaviour. If a sidechain import is in progress, and the historic state // is imported, but then new canon-head is added before the actual sidechain // completes, then the historic state could be pruned again +// TODO(GalaIO): if enable cancun, it must set received blob cache for check, remove cache when failed func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) { // If the chain is terminating, don't even bother starting up. if bc.insertStopped() { @@ -1984,6 +2005,16 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) if parent == nil { parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) } + + // check blob data available first + // TODO(GalaIO): move IsDataAvailable combine into verifyHeaders? + if bc.chainConfig.IsCancun(block.Number(), block.Time()) { + if posa, ok := bc.engine.(consensus.PoSA); ok { + if err = posa.IsDataAvailable(bc, block); err != nil { + return it.index, err + } + } + } statedb, err := state.NewWithSharedPool(parent.Root, bc.stateCache, bc.snaps) if err != nil { return it.index, err diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index b3cc434193..c55f9496c6 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -247,6 +247,23 @@ func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { return receipts } +// GetBlobsByHash retrieves the blobs for all transactions in a given block. +func (bc *BlockChain) GetBlobsByHash(hash common.Hash) types.BlobTxSidecars { + if blobs, ok := bc.blobsCache.Get(hash); ok { + return blobs + } + number := rawdb.ReadHeaderNumber(bc.db, hash) + if number == nil { + return nil + } + blobs := rawdb.ReadRawBlobs(bc.db, hash, *number) + if blobs == nil { + return nil + } + bc.blobsCache.Add(hash, blobs) + return blobs +} + // GetUnclesInChain retrieves all the uncles from a given block backwards until // a specific distance is reached. func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header { diff --git a/core/chain_makers.go b/core/chain_makers.go index 932addc396..20924368a6 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -457,6 +457,9 @@ func (cm *chainMaker) makeHeader(parent *types.Block, state *state.StateDB, engi header.ExcessBlobGas = &excessBlobGas header.BlobGasUsed = new(uint64) header.ParentBeaconRoot = new(common.Hash) + if cm.config.Parlia != nil { + header.WithdrawalsHash = new(common.Hash) + } } return header } diff --git a/core/genesis.go b/core/genesis.go index 1070f470e6..ad3d062b23 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -509,6 +509,9 @@ func (g *Genesis) ToBlock() *types.Block { if head.BlobGasUsed == nil { head.BlobGasUsed = new(uint64) } + if conf.Parlia != nil { + head.WithdrawalsHash = new(common.Hash) + } } } return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil)).WithWithdrawals(withdrawals) diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 1a7552d698..5e9ce1e33d 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -360,6 +360,20 @@ func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header return header } +// ReadHeaderAndRaw retrieves the block header corresponding to the hash. +func ReadHeaderAndRaw(db ethdb.Reader, hash common.Hash, number uint64) (*types.Header, rlp.RawValue) { + data := ReadHeaderRLP(db, hash, number) + if len(data) == 0 { + return nil, nil + } + header := new(types.Header) + if err := rlp.DecodeBytes(data, header); err != nil { + log.Error("Invalid block header RLP", "hash", hash, "err", err) + return nil, nil + } + return header, data +} + // WriteHeader stores a block header into the database and also stores the hash- // to-number mapping. func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) { @@ -809,6 +823,98 @@ func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts }) } +// WriteAncientBlocksWithBlobs writes entire block data into ancient store and returns the total written size. +// Attention: The caller must set blobs after cancun +func WriteAncientBlocksWithBlobs(db ethdb.AncientStore, blocks []*types.Block, receipts []types.Receipts, td *big.Int, blobs []types.BlobTxSidecars) (int64, error) { + if len(blocks) == 0 { + return 0, nil + } + + // do some sanity check + if len(blocks) != len(blobs) { + return 0, fmt.Errorf("the blobs len is different with blocks, %v:%v", len(blobs), len(blocks)) + } + if len(blocks) != len(receipts) { + return 0, fmt.Errorf("the receipts len is different with blocks, %v:%v", len(receipts), len(blocks)) + } + // try reset empty blob ancient table + if err := ResetEmptyBlobAncientTable(db, blocks[0].NumberU64()); err != nil { + return 0, err + } + + var ( + tdSum = new(big.Int).Set(td) + stReceipts []*types.ReceiptForStorage + ) + return db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i, block := range blocks { + // Convert receipts to storage format and sum up total difficulty. + stReceipts = stReceipts[:0] + for _, receipt := range receipts[i] { + stReceipts = append(stReceipts, (*types.ReceiptForStorage)(receipt)) + } + header := block.Header() + if i > 0 { + tdSum.Add(tdSum, header.Difficulty) + } + if err := writeAncientBlockWithBlob(op, block, header, stReceipts, tdSum, blobs[i]); err != nil { + return err + } + } + return nil + }) +} + +// ReadBlobsRLP retrieves all the transaction blobs belonging to a block in RLP encoding. +func ReadBlobsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { + var data []byte + db.ReadAncients(func(reader ethdb.AncientReaderOp) error { + // Check if the data is in ancients + if isCanon(reader, number, hash) { + data, _ = reader.Ancient(ChainFreezerBlobTable, number) + return nil + } + // If not, try reading from leveldb + data, _ = db.Get(blockBlobsKey(number, hash)) + return nil + }) + return data +} + +// ReadRawBlobs retrieves all the transaction blobs belonging to a block. +func ReadRawBlobs(db ethdb.Reader, hash common.Hash, number uint64) types.BlobTxSidecars { + data := ReadBlobsRLP(db, hash, number) + if len(data) == 0 { + return nil + } + var ret types.BlobTxSidecars + if err := rlp.DecodeBytes(data, &ret); err != nil { + log.Error("Invalid blob array RLP", "hash", hash, "err", err) + return nil + } + return ret +} + +// WriteBlobs stores all the transaction blobs belonging to a block. +// It could input nil for empty blobs. +func WriteBlobs(db ethdb.KeyValueWriter, hash common.Hash, number uint64, blobs types.BlobTxSidecars) { + data, err := rlp.EncodeToBytes(blobs) + if err != nil { + log.Crit("Failed to encode block blobs", "err", err) + } + // Store the flattened receipt slice + if err := db.Put(blockBlobsKey(number, hash), data); err != nil { + log.Crit("Failed to store block blobs", "err", err) + } +} + +// DeleteBlobs removes all blob data associated with a block hash. +func DeleteBlobs(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { + if err := db.Delete(blockBlobsKey(number, hash)); err != nil { + log.Crit("Failed to delete block blobs", "err", err) + } +} + func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *types.Header, receipts []*types.ReceiptForStorage, td *big.Int) error { num := block.NumberU64() if err := op.AppendRaw(ChainFreezerHashTable, num, block.Hash().Bytes()); err != nil { @@ -829,12 +935,33 @@ func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *type return nil } +func writeAncientBlob(op ethdb.AncientWriteOp, num uint64, blobs types.BlobTxSidecars) error { + if err := op.Append(ChainFreezerBlobTable, num, blobs); err != nil { + return fmt.Errorf("can't append block %d blobs: %v", num, err) + } + return nil +} + +// writeAncientBlockWithBlob writes entire block data into ancient store and returns the total written size. +// Attention: The caller must set blobs after cancun +func writeAncientBlockWithBlob(op ethdb.AncientWriteOp, block *types.Block, header *types.Header, receipts []*types.ReceiptForStorage, td *big.Int, blobs types.BlobTxSidecars) error { + num := block.NumberU64() + if err := writeAncientBlock(op, block, header, receipts, td); err != nil { + return err + } + if err := writeAncientBlob(op, num, blobs); err != nil { + return err + } + return nil +} + // DeleteBlock removes all block data associated with a hash. func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { DeleteReceipts(db, hash, number) DeleteHeader(db, hash, number) DeleteBody(db, hash, number) DeleteTd(db, hash, number) + DeleteBlobs(db, hash, number) // it is safe to delete non-exist blob } // DeleteBlockWithoutNumber removes all block data associated with a hash, except @@ -844,6 +971,7 @@ func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number deleteHeaderWithoutNumber(db, hash, number) DeleteBody(db, hash, number) DeleteTd(db, hash, number) + DeleteBlobs(db, hash, number) } const badBlockToKeep = 10 diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index 6b93e3d66e..b4a0ca5253 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -18,14 +18,21 @@ package rawdb import ( "bytes" + rand2 "crypto/rand" "encoding/hex" "fmt" + "io" "math/big" "math/rand" "os" "reflect" "testing" + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/holiman/uint256" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -412,6 +419,62 @@ func TestBlockReceiptStorage(t *testing.T) { } } +func TestBlockBlobsStorage(t *testing.T) { + db := NewMemoryDatabase() + + // Create a live block since we need metadata to reconstruct the receipt + genBlobs := makeBlkBlobs(1, 2) + tx1 := types.NewTx(&types.BlobTx{ + ChainID: new(uint256.Int).SetUint64(1), + GasTipCap: new(uint256.Int), + GasFeeCap: new(uint256.Int), + Gas: 0, + Value: new(uint256.Int), + Data: nil, + BlobFeeCap: new(uint256.Int), + BlobHashes: []common.Hash{common.HexToHash("0x34ec6e64f9cda8fe0451a391e4798085a3ef51a65ed1bfb016e34fc1a2028f8f"), common.HexToHash("0xb9a412e875f29fac436acde234f954e91173c4cf79814f6dcf630d8a6345747f")}, + Sidecar: genBlobs[0], + V: new(uint256.Int), + R: new(uint256.Int), + S: new(uint256.Int), + }) + tx2 := types.NewTx(&types.DynamicFeeTx{ + ChainID: new(big.Int).SetUint64(1), + GasTipCap: new(big.Int), + GasFeeCap: new(big.Int), + Gas: 0, + Value: new(big.Int), + Data: nil, + V: new(big.Int), + R: new(big.Int), + S: new(big.Int), + }) + + blkHash := common.BytesToHash([]byte{0x03, 0x14}) + body := &types.Body{Transactions: types.Transactions{tx1, tx2}} + blobs := types.BlobTxSidecars{tx1.BlobTxSidecar()} + + // Check that no blobs entries are in a pristine database + if bs := ReadRawBlobs(db, blkHash, 0); len(bs) != 0 { + t.Fatalf("non existent blobs returned: %v", bs) + } + WriteBody(db, blkHash, 0, body) + WriteBlobs(db, blkHash, 0, blobs) + + if bs := ReadRawBlobs(db, blkHash, 0); len(bs) == 0 { + t.Fatalf("no blobs returned") + } else { + if err := checkBlobsRLP(bs, blobs); err != nil { + t.Fatalf(err.Error()) + } + } + + DeleteBlobs(db, blkHash, 0) + if bs := ReadRawBlobs(db, blkHash, 0); len(bs) != 0 { + t.Fatalf("deleted blobs returned: %v", bs) + } +} + func checkReceiptsRLP(have, want types.Receipts) error { if len(have) != len(want) { return fmt.Errorf("receipts sizes mismatch: have %d, want %d", len(have), len(want)) @@ -432,6 +495,26 @@ func checkReceiptsRLP(have, want types.Receipts) error { return nil } +func checkBlobsRLP(have, want types.BlobTxSidecars) error { + if len(have) != len(want) { + return fmt.Errorf("blobs sizes mismatch: have %d, want %d", len(have), len(want)) + } + for i := 0; i < len(want); i++ { + rlpHave, err := rlp.EncodeToBytes(have[i]) + if err != nil { + return err + } + rlpWant, err := rlp.EncodeToBytes(want[i]) + if err != nil { + return err + } + if !bytes.Equal(rlpHave, rlpWant) { + return fmt.Errorf("blob #%d: receipt mismatch: have %s, want %s", i, hex.EncodeToString(rlpHave), hex.EncodeToString(rlpWant)) + } + } + return nil +} + func TestAncientStorage(t *testing.T) { // Freezer style fast import the chain. frdir := t.TempDir() @@ -465,7 +548,8 @@ func TestAncientStorage(t *testing.T) { } // Write and verify the header in the database - WriteAncientBlocks(db, []*types.Block{block}, []types.Receipts{nil}, big.NewInt(100)) + _, err = WriteAncientBlocks(db, []*types.Block{block}, []types.Receipts{nil}, big.NewInt(100)) + require.NoError(t, err) if blob := ReadHeaderRLP(db, hash, number); len(blob) == 0 { t.Fatalf("no header returned") @@ -586,6 +670,7 @@ func BenchmarkWriteAncientBlocks(b *testing.B) { const blockTxs = 20 allBlocks := makeTestBlocks(b.N, blockTxs) batchReceipts := makeTestReceipts(batchSize, blockTxs) + batchBlobs := makeTestBlobs(batchSize, blockTxs) b.ResetTimer() // The benchmark loop writes batches of blocks, but note that the total block count is @@ -601,7 +686,8 @@ func BenchmarkWriteAncientBlocks(b *testing.B) { blocks := allBlocks[i : i+length] receipts := batchReceipts[:length] - writeSize, err := WriteAncientBlocks(db, blocks, receipts, td) + blobs := batchBlobs[:length] + writeSize, err := WriteAncientBlocksWithBlobs(db, blocks, receipts, td, blobs) if err != nil { b.Fatal(err) } @@ -663,6 +749,38 @@ func makeTestReceipts(n int, nPerBlock int) []types.Receipts { return allReceipts } +func makeBlkBlobs(n, nPerTx int) types.BlobTxSidecars { + if n <= 0 { + return nil + } + ret := make(types.BlobTxSidecars, n) + for i := 0; i < n; i++ { + blobs := make([]kzg4844.Blob, nPerTx) + commitments := make([]kzg4844.Commitment, nPerTx) + proofs := make([]kzg4844.Proof, nPerTx) + for i := 0; i < nPerTx; i++ { + io.ReadFull(rand2.Reader, blobs[i][:]) + commitments[i], _ = kzg4844.BlobToCommitment(blobs[i]) + proofs[i], _ = kzg4844.ComputeBlobProof(blobs[i], commitments[i]) + } + ret[i] = &types.BlobTxSidecar{ + Blobs: blobs, + Commitments: commitments, + Proofs: proofs, + } + } + return ret +} + +// makeTestBlobs creates fake blobs for the ancient write benchmark. +func makeTestBlobs(n int, nPerBlock int) []types.BlobTxSidecars { + allBlobs := make([]types.BlobTxSidecars, n) + for i := 0; i < n; i++ { + allBlobs[i] = makeBlkBlobs(nPerBlock, i%3) + } + return allBlobs +} + type fullLogRLP struct { Address common.Address Topics []common.Hash diff --git a/core/rawdb/ancient_scheme.go b/core/rawdb/ancient_scheme.go index 2dc1eae24f..30a1cf948c 100644 --- a/core/rawdb/ancient_scheme.go +++ b/core/rawdb/ancient_scheme.go @@ -34,6 +34,9 @@ const ( // ChainFreezerDifficultyTable indicates the name of the freezer total difficulty table. ChainFreezerDifficultyTable = "diffs" + + // ChainFreezerBlobTable indicates the name of the freezer total blob table. + ChainFreezerBlobTable = "blobs" ) // chainFreezerNoSnappy configures whether compression is disabled for the ancient-tables. @@ -44,6 +47,7 @@ var chainFreezerNoSnappy = map[string]bool{ ChainFreezerBodiesTable: false, ChainFreezerReceiptTable: false, ChainFreezerDifficultyTable: true, + ChainFreezerBlobTable: false, } const ( diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go index c0bd64ef08..d1735f31be 100644 --- a/core/rawdb/chain_freezer.go +++ b/core/rawdb/chain_freezer.go @@ -18,10 +18,13 @@ package rawdb import ( "fmt" + "math/big" "sync" "sync/atomic" "time" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" @@ -49,6 +52,8 @@ type chainFreezer struct { quit chan struct{} wg sync.WaitGroup trigger chan chan struct{} // Manual blocking freeze trigger, test determinism + + freezeEnv atomic.Value } // newChainFreezer initializes the freezer for ancient chain data. @@ -156,7 +161,9 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) { if limit-first > freezerBatchLimit { limit = first + freezerBatchLimit } - ancients, err := f.freezeRange(nfdb, first, limit) + var env *ethdb.FreezerEnv + env, _ = f.freezeEnv.Load().(*ethdb.FreezerEnv) + ancients, err := f.freezeRange(nfdb, first, limit, env) if err != nil { log.Error("Error in block freeze operation", "err", err) backoff = true @@ -243,6 +250,11 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) { } log.Debug("Deep froze chain segment", context...) + // try prune blob data after cancun fork + if isCancun(env, head.Number, head.Time) { + f.tryPruneBlobAncient(env, *number) + } + // Avoid database thrashing with tiny writes if frozen-first < freezerBatchLimit { backoff = true @@ -250,9 +262,44 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) { } } -func (f *chainFreezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hashes []common.Hash, err error) { +func (f *chainFreezer) tryPruneBlobAncient(env *ethdb.FreezerEnv, num uint64) { + extraReserve := getExtraReserveFromEnv(env) + // It means that there is no need for pruning + if extraReserve < 0 { + return + } + reserveThreshold := uint64(params.BlobReserveThreshold + extraReserve) + if num <= reserveThreshold { + return + } + expectTail := num - reserveThreshold + h, err := f.TableAncients(ChainFreezerBlobTable) + if err != nil { + log.Error("Cannot get blob ancient head when prune", "block", num) + return + } + start := time.Now() + if err = f.ResetTable(ChainFreezerBlobTable, expectTail, h, false); err != nil { + log.Error("Cannot prune blob ancient", "block", num, "expectTail", expectTail) + } + log.Info("Chain freezer prune useless blobs, now ancient data is", "from", expectTail, "to", num, "cost", common.PrettyDuration(time.Since(start))) +} + +func getExtraReserveFromEnv(env *ethdb.FreezerEnv) int64 { + if env == nil { + return params.BlobExtraReserveThreshold + } + return env.BlobExtraReserve +} + +func (f *chainFreezer) freezeRange(nfdb *nofreezedb, number, limit uint64, env *ethdb.FreezerEnv) (hashes []common.Hash, err error) { hashes = make([]common.Hash, 0, limit-number) + // try init blob ancient first + if err := f.tryInitBlobAncient(nfdb, number, limit, env); err != nil { + return nil, err + } + _, err = f.ModifyAncients(func(op ethdb.AncientWriteOp) error { for ; number <= limit; number++ { // Retrieve all the components of the canonical block. @@ -260,7 +307,7 @@ func (f *chainFreezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hash if hash == (common.Hash{}) { return fmt.Errorf("canonical hash missing, can't freeze block %d", number) } - header := ReadHeaderRLP(nfdb, hash, number) + h, header := ReadHeaderAndRaw(nfdb, hash, number) if len(header) == 0 { return fmt.Errorf("block header missing, can't freeze block %d", number) } @@ -276,6 +323,14 @@ func (f *chainFreezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hash if len(td) == 0 { return fmt.Errorf("total difficulty missing, can't freeze block %d", number) } + // blobs is nil before cancun fork + var blobs rlp.RawValue + if isCancun(env, h.Number, h.Time) { + blobs = ReadBlobsRLP(nfdb, hash, number) + if len(blobs) == 0 { + return fmt.Errorf("block blobs missing, can't freeze block %d", number) + } + } // Write to the batch. if err := op.AppendRaw(ChainFreezerHashTable, number, hash[:]); err != nil { @@ -293,6 +348,11 @@ func (f *chainFreezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hash if err := op.AppendRaw(ChainFreezerDifficultyTable, number, td); err != nil { return fmt.Errorf("can't write td to Freezer: %v", err) } + if isCancun(env, h.Number, h.Time) { + if err := op.AppendRaw(ChainFreezerBlobTable, number, blobs); err != nil { + return fmt.Errorf("can't write blobs to Freezer: %v", err) + } + } hashes = append(hashes, hash) } @@ -301,3 +361,56 @@ func (f *chainFreezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hash return hashes, err } + +func (f *chainFreezer) tryInitBlobAncient(nfdb *nofreezedb, number uint64, limit uint64, env *ethdb.FreezerEnv) error { + emptyBlobs, err := EmptyBlobAncient(f) + if err != nil { + return err + } + if !emptyBlobs { + return nil + } + for ; number <= limit; number++ { + hash := ReadCanonicalHash(nfdb, number) + if hash == (common.Hash{}) { + return fmt.Errorf("canonical hash missing, can't freeze block %d", number) + } + h, header := ReadHeaderAndRaw(nfdb, hash, number) + if len(header) == 0 { + return fmt.Errorf("block header missing, can't freeze block %d", number) + } + if isCancun(env, h.Number, h.Time) { + if err = ResetEmptyBlobAncientTable(f, number); err != nil { + return err + } + break + } + } + return nil +} + +// EmptyBlobAncient check if empty in blob ancient, it is used to init blob ancient +func EmptyBlobAncient(f *chainFreezer) (bool, error) { + frozen, err := f.TableAncients(ChainFreezerBlobTable) + if err != nil { + return false, err + } + return frozen == 0, nil +} + +func (f *chainFreezer) SetupFreezerEnv(env *ethdb.FreezerEnv) error { + f.freezeEnv.Store(env) + return nil +} + +func isCancun(env *ethdb.FreezerEnv, num *big.Int, time uint64) bool { + if env == nil || env.ChainCfg == nil { + return false + } + + return env.ChainCfg.IsCancun(num, time) +} + +func ResetEmptyBlobAncientTable(db ethdb.AncientStore, next uint64) error { + return db.ResetTable(ChainFreezerBlobTable, next, next, true) +} diff --git a/core/rawdb/database.go b/core/rawdb/database.go index e7e53947f6..e6ceae34f5 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -41,6 +41,7 @@ type freezerdb struct { ancientRoot string ethdb.KeyValueStore ethdb.AncientStore + ethdb.AncientFreezer diffStore ethdb.KeyValueStore } @@ -101,6 +102,10 @@ func (frdb *freezerdb) Freeze(threshold uint64) error { return nil } +func (frdb *freezerdb) SetupFreezerEnv(env *ethdb.FreezerEnv) error { + return frdb.AncientFreezer.SetupFreezerEnv(env) +} + // nofreezedb is a database wrapper that disables freezer data retrievals. type nofreezedb struct { ethdb.KeyValueStore @@ -147,6 +152,10 @@ func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, e return 0, errNotSupported } +func (db *nofreezedb) ResetTable(kind string, tail uint64, head uint64, onlyEmpty bool) error { + return errNotSupported +} + // TruncateHead returns an error as we don't have a backing chain freezer. func (db *nofreezedb) TruncateHead(items uint64) (uint64, error) { return 0, errNotSupported @@ -201,6 +210,10 @@ func (db *nofreezedb) AncientDatadir() (string, error) { return "", errNotSupported } +func (db *nofreezedb) SetupFreezerEnv(env *ethdb.FreezerEnv) error { + return nil +} + // NewDatabase creates a high level database on top of a given key-value data // store without a freezer moving immutable chain segments into cold storage. func NewDatabase(db ethdb.KeyValueStore) ethdb.Database { @@ -210,7 +223,7 @@ func NewDatabase(db ethdb.KeyValueStore) ethdb.Database { // NewFreezerDb only create a freezer without statedb. func NewFreezerDb(db ethdb.KeyValueStore, frz, namespace string, readonly bool, newOffSet uint64) (*Freezer, error) { // Create the idle freezer instance, this operation should be atomic to avoid mismatch between offset and acientDB. - frdb, err := NewFreezer(frz, namespace, readonly, newOffSet, freezerTableSize, chainFreezerNoSnappy) + frdb, err := NewFreezer(frz, namespace, readonly, newOffSet, freezerTableSize, chainFreezerNoSnappy, ChainFreezerBlobTable) if err != nil { return nil, err } @@ -267,9 +280,10 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st WriteAncientType(db, PruneFreezerType) } return &freezerdb{ - ancientRoot: ancient, - KeyValueStore: db, - AncientStore: frdb, + ancientRoot: ancient, + KeyValueStore: db, + AncientStore: frdb, + AncientFreezer: frdb, }, nil } @@ -382,9 +396,10 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st }() } return &freezerdb{ - ancientRoot: ancient, - KeyValueStore: db, - AncientStore: frdb, + ancientRoot: ancient, + KeyValueStore: db, + AncientStore: frdb, + AncientFreezer: frdb, }, nil } diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index 89ad12ffa9..6ab20430a2 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -26,6 +26,8 @@ import ( "sync/atomic" "time" + "golang.org/x/exp/slices" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" @@ -75,12 +77,14 @@ type Freezer struct { instanceLock *flock.Flock // File-system lock to prevent double opens closeOnce sync.Once offset uint64 // Starting BlockNumber in current freezer + + additionTableKinds []string // additionTableKinds are post-filled tables that start as empty } // NewChainFreezer is a small utility method around NewFreezer that sets the // default parameters for the chain storage. func NewChainFreezer(datadir string, namespace string, readonly bool, offset uint64) (*Freezer, error) { - return NewFreezer(datadir, namespace, readonly, offset, freezerTableSize, chainFreezerNoSnappy) + return NewFreezer(datadir, namespace, readonly, offset, freezerTableSize, chainFreezerNoSnappy, ChainFreezerBlobTable) } // NewFreezer creates a freezer instance for maintaining immutable ordered @@ -88,7 +92,8 @@ func NewChainFreezer(datadir string, namespace string, readonly bool, offset uin // // The 'tables' argument defines the data tables. If the value of a map // entry is true, snappy compression is disabled for the table. -func NewFreezer(datadir string, namespace string, readonly bool, offset uint64, maxTableSize uint32, tables map[string]bool) (*Freezer, error) { +// additionTables indicates the new add tables for freezerDB, it has some special rules. +func NewFreezer(datadir string, namespace string, readonly bool, offset uint64, maxTableSize uint32, tables map[string]bool, additionTables ...string) (*Freezer, error) { // Create the initial freezer object var ( readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil) @@ -120,15 +125,24 @@ func NewFreezer(datadir string, namespace string, readonly bool, offset uint64, } // Open all the supported data tables freezer := &Freezer{ - readonly: readonly, - tables: make(map[string]*freezerTable), - instanceLock: lock, - offset: offset, + readonly: readonly, + tables: make(map[string]*freezerTable), + instanceLock: lock, + offset: offset, + additionTableKinds: additionTables, } // Create the tables. for name, disableSnappy := range tables { - table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy, readonly) + var ( + table *freezerTable + err error + ) + if slices.Contains(additionTables, name) { + table, err = openAdditionTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy, readonly) + } else { + table, err = newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy, readonly) + } if err != nil { for _, table := range freezer.tables { table.Close() @@ -167,6 +181,20 @@ func NewFreezer(datadir string, namespace string, readonly bool, offset uint64, return freezer, nil } +// openAdditionTable create table, it will auto create new files when it was first initialized +func openAdditionTable(datadir, name string, readMeter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxTableSize uint32, disableSnappy, readonly bool) (*freezerTable, error) { + if readonly { + f, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy, false) + if err != nil { + return nil, err + } + if err = f.Close(); err != nil { + return nil, err + } + } + return newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy, readonly) +} + // Close terminates the chain freezer, unmapping all the data files. func (f *Freezer) Close() error { f.writeLock.Lock() @@ -224,6 +252,12 @@ func (f *Freezer) Ancients() (uint64, error) { return f.frozen.Load(), nil } +func (f *Freezer) TableAncients(kind string) (uint64, error) { + f.writeLock.RLock() + defer f.writeLock.RUnlock() + return f.tables[kind].items.Load(), nil +} + // ItemAmountInAncient returns the actual length of current ancientDB. func (f *Freezer) ItemAmountInAncient() (uint64, error) { return f.frozen.Load() - atomic.LoadUint64(&f.offset), nil @@ -365,6 +399,10 @@ func (f *Freezer) validate() error { ) // Hack to get boundary of any table for kind, table := range f.tables { + // addition tables is special cases + if slices.Contains(f.additionTableKinds, kind) { + continue + } head = table.items.Load() tail = table.itemHidden.Load() name = kind @@ -372,6 +410,21 @@ func (f *Freezer) validate() error { } // Now check every table against those boundaries. for kind, table := range f.tables { + // check addition tables, try to align with exist tables + if slices.Contains(f.additionTableKinds, kind) { + // if the table is empty, just skip + if EmptyTable(table) { + continue + } + // otherwise, just align head + if head != table.items.Load() { + return fmt.Errorf("freezer tables %s and %s have differing head: %d != %d", kind, name, table.items.Load(), head) + } + if tail > table.itemHidden.Load() { + return fmt.Errorf("freezer tables %s and %s have differing tail: %d != %d", kind, name, table.itemHidden.Load(), tail) + } + continue + } if head != table.items.Load() { return fmt.Errorf("freezer tables %s and %s have differing head: %d != %d", kind, name, table.items.Load(), head) } @@ -390,7 +443,18 @@ func (f *Freezer) repair() error { head = uint64(math.MaxUint64) tail = uint64(0) ) - for _, table := range f.tables { + for kind, table := range f.tables { + // addition tables only align head + if slices.Contains(f.additionTableKinds, kind) { + if EmptyTable(table) { + continue + } + items := table.items.Load() + if head > items { + head = items + } + continue + } items := table.items.Load() if head > items { head = items @@ -400,7 +464,11 @@ func (f *Freezer) repair() error { tail = hidden } } - for _, table := range f.tables { + for kind, table := range f.tables { + // try to align with exist tables, skip empty table + if slices.Contains(f.additionTableKinds, kind) && EmptyTable(table) { + continue + } if err := table.truncateHead(head); err != nil { return err } @@ -602,3 +670,48 @@ func (f *Freezer) MigrateTable(kind string, convert convertLegacyFn) error { } return nil } + +func (f *Freezer) ResetTable(kind string, tail, head uint64, onlyEmpty bool) error { + if f.readonly { + return errReadOnly + } + if err := f.Sync(); err != nil { + return err + } + + f.writeLock.Lock() + defer f.writeLock.Unlock() + if tail < f.offset || head < f.offset { + return errors.New("the input tail&head is less than offset") + } + if _, exist := f.tables[kind]; !exist { + return errors.New("you reset a non-exist table") + } + // if you reset a non empty table just skip + if onlyEmpty && !EmptyTable(f.tables[kind]) { + return nil + } + + nt, err := f.tables[kind].resetItems(tail-f.offset, head-f.offset) + if err != nil { + return err + } + f.tables[kind] = nt + + if err := f.repair(); err != nil { + for _, table := range f.tables { + table.Close() + } + return err + } + + f.frozen.Add(f.offset) + f.tail.Add(f.offset) + f.writeBatch = newFreezerBatch(f) + log.Info("Reset Table", "tail", f.tail.Load(), "frozen", f.frozen.Load()) + return nil +} + +func EmptyTable(t *freezerTable) bool { + return t.items.Load() == 0 +} diff --git a/core/rawdb/freezer_batch.go b/core/rawdb/freezer_batch.go index 3e1a8143a6..bef9c70b33 100644 --- a/core/rawdb/freezer_batch.go +++ b/core/rawdb/freezer_batch.go @@ -20,6 +20,8 @@ import ( "fmt" "sync/atomic" + "golang.org/x/exp/slices" + "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/rlp" "github.com/golang/snappy" @@ -31,11 +33,15 @@ const freezerBatchBufferLimit = 2 * 1024 * 1024 // freezerBatch is a write operation of multiple items on a freezer. type freezerBatch struct { - tables map[string]*freezerTableBatch + tables map[string]*freezerTableBatch + additionTableKinds []string // additionTableKinds are post-filled tables that start as empty } func newFreezerBatch(f *Freezer) *freezerBatch { - batch := &freezerBatch{tables: make(map[string]*freezerTableBatch, len(f.tables))} + batch := &freezerBatch{ + tables: make(map[string]*freezerTableBatch, len(f.tables)), + additionTableKinds: f.additionTableKinds, + } for kind, table := range f.tables { batch.tables[kind] = table.newBatch(f.offset) } @@ -65,6 +71,10 @@ func (batch *freezerBatch) commit() (item uint64, writeSize int64, err error) { // Check that count agrees on all batches. item = uint64(math.MaxUint64) for name, tb := range batch.tables { + // skip empty addition tables + if slices.Contains(batch.additionTableKinds, name) && EmptyTable(tb.t) { + continue + } if item < math.MaxUint64 && tb.curItem != item { return 0, 0, fmt.Errorf("table %s is at item %d, want %d", name, tb.curItem, item) } diff --git a/core/rawdb/freezer_resettable.go b/core/rawdb/freezer_resettable.go index 67565530e1..1bbc39d7f5 100644 --- a/core/rawdb/freezer_resettable.go +++ b/core/rawdb/freezer_resettable.go @@ -187,6 +187,13 @@ func (f *ResettableFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) return f.freezer.ModifyAncients(fn) } +func (f *ResettableFreezer) ResetTable(kind string, tail uint64, head uint64, onlyEmpty bool) error { + f.lock.RLock() + defer f.lock.RUnlock() + + return f.freezer.ResetTable(kind, tail, head, onlyEmpty) +} + // TruncateHead discards any recent data above the provided threshold number. // It returns the previous head number. func (f *ResettableFreezer) TruncateHead(items uint64) (uint64, error) { diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go index daa594ec90..04e1261acf 100644 --- a/core/rawdb/freezer_table.go +++ b/core/rawdb/freezer_table.go @@ -1026,3 +1026,54 @@ func (t *freezerTable) ResetItemsOffset(virtualTail uint64) error { return nil } + +// resetItems reset freezer table head & tail +func (t *freezerTable) resetItems(tail, head uint64) (*freezerTable, error) { + if t.readonly { + return nil, errors.New("resetItems in readonly mode") + } + itemHidden := t.itemHidden.Load() + items := t.items.Load() + if tail != head && (itemHidden > tail || items < head) { + return nil, errors.New("cannot reset to non-exist range") + } + + var err error + if tail != head { + if err = t.truncateHead(head); err != nil { + return nil, err + } + if err = t.truncateTail(tail); err != nil { + return nil, err + } + return t, nil + } + + // if tail == head, it means table reset to 0 item + t.releaseFilesAfter(t.tailId-1, true) + t.head.Close() + os.Remove(t.head.Name()) + t.index.Close() + os.Remove(t.index.Name()) + t.meta.Close() + os.Remove(t.meta.Name()) + + var idxName string + if t.noCompression { + idxName = fmt.Sprintf("%s.ridx", t.name) // raw index file + } else { + idxName = fmt.Sprintf("%s.cidx", t.name) // compressed index file + } + index, err := openFreezerFileForAppend(filepath.Join(t.path, idxName)) + if err != nil { + return nil, err + } + tailIndex := indexEntry{ + offset: uint32(tail), + } + if _, err = index.Write(tailIndex.append(nil)); err != nil { + return nil, err + } + + return newFreezerTable(t.path, t.name, t.noCompression, t.readonly) +} diff --git a/core/rawdb/freezer_table_test.go b/core/rawdb/freezer_table_test.go index d9a1aee595..5ef7619403 100644 --- a/core/rawdb/freezer_table_test.go +++ b/core/rawdb/freezer_table_test.go @@ -1370,3 +1370,98 @@ func TestRandom(t *testing.T) { t.Fatal(err) } } + +func TestResetItems(t *testing.T) { + t.Parallel() + rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() + fname := fmt.Sprintf("truncate-tail-%d", rand.Uint64()) + + // Fill table + f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) + if err != nil { + t.Fatal(err) + } + + // Write 7 x 20 bytes, splitting out into four files + batch := f.newBatch(0) + require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF))) + require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE))) + require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd))) + require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc))) + require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb))) + require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa))) + require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11))) + require.NoError(t, batch.commit()) + + // nothing to do, all the items should still be there. + f, err = f.resetItems(0, 7) + require.NoError(t, err) + fmt.Println(f.dumpIndexString(0, 1000)) + checkRetrieve(t, f, map[uint64][]byte{ + 0: getChunk(20, 0xFF), + 1: getChunk(20, 0xEE), + 2: getChunk(20, 0xdd), + 3: getChunk(20, 0xcc), + 4: getChunk(20, 0xbb), + 5: getChunk(20, 0xaa), + 6: getChunk(20, 0x11), + }) + + f, err = f.resetItems(1, 5) + require.NoError(t, err) + _, err = f.resetItems(0, 5) + require.Error(t, err) + _, err = f.resetItems(1, 6) + require.Error(t, err) + + fmt.Println(f.dumpIndexString(0, 1000)) + checkRetrieveError(t, f, map[uint64]error{ + 0: errOutOfBounds, + }) + checkRetrieve(t, f, map[uint64][]byte{ + 1: getChunk(20, 0xEE), + 2: getChunk(20, 0xdd), + 3: getChunk(20, 0xcc), + 4: getChunk(20, 0xbb), + }) + + f, err = f.resetItems(4, 4) + require.NoError(t, err) + checkRetrieveError(t, f, map[uint64]error{ + 4: errOutOfBounds, + }) + + batch = f.newBatch(0) + require.Error(t, batch.AppendRaw(0, getChunk(20, 0xa0))) + require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xa4))) + require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xa5))) + require.NoError(t, batch.commit()) + fmt.Println(f.dumpIndexString(0, 1000)) + + // Reopen the table, the deletion information should be persisted as well + f.Close() + f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) + if err != nil { + t.Fatal(err) + } + fmt.Println(f.dumpIndexString(0, 1000)) + checkRetrieveError(t, f, map[uint64]error{ + 0: errOutOfBounds, + }) + checkRetrieve(t, f, map[uint64][]byte{ + 4: getChunk(20, 0xa4), + 5: getChunk(20, 0xa5), + }) + + // truncate all, the entire freezer should be deleted + f.truncateTail(6) + checkRetrieveError(t, f, map[uint64]error{ + 0: errOutOfBounds, + 1: errOutOfBounds, + 2: errOutOfBounds, + 3: errOutOfBounds, + 4: errOutOfBounds, + 5: errOutOfBounds, + 6: errOutOfBounds, + }) +} diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go index 5be7f09330..5f68fff527 100644 --- a/core/rawdb/freezer_test.go +++ b/core/rawdb/freezer_test.go @@ -334,6 +334,72 @@ func TestFreezerConcurrentReadonly(t *testing.T) { } } +func TestFreezer_AdditionTables(t *testing.T) { + dir := t.TempDir() + // Open non-readonly freezer and fill individual tables + // with different amount of data. + f, err := NewFreezer(dir, "", false, 0, 2049, map[string]bool{"o1": true, "o2": true}) + if err != nil { + t.Fatal("can't open freezer", err) + } + + var item = make([]byte, 1024) + _, err = f.ModifyAncients(func(op ethdb.AncientWriteOp) error { + if err := op.AppendRaw("o1", 0, item); err != nil { + return err + } + if err := op.AppendRaw("o1", 1, item); err != nil { + return err + } + if err := op.AppendRaw("o2", 0, item); err != nil { + return err + } + if err := op.AppendRaw("o2", 1, item); err != nil { + return err + } + return nil + }) + require.NoError(t, err) + require.NoError(t, f.Close()) + + // check read only + f, err = NewFreezer(dir, "", true, 0, 2049, map[string]bool{"o1": true, "o2": true, "a1": true}, "a1") + require.NoError(t, err) + require.NoError(t, f.Close()) + + f, err = NewFreezer(dir, "", false, 0, 2049, map[string]bool{"o1": true, "o2": true, "a1": true}, "a1") + require.NoError(t, err) + frozen, _ := f.Ancients() + f.ResetTable("a1", frozen, frozen, true) + _, err = f.ModifyAncients(func(op ethdb.AncientWriteOp) error { + if err := op.AppendRaw("o1", 2, item); err != nil { + return err + } + if err := op.AppendRaw("o2", 2, item); err != nil { + return err + } + if err := op.AppendRaw("a1", 2, item); err != nil { + return err + } + return nil + }) + require.NoError(t, err) + _, err = f.Ancient("a1", 1) + require.Error(t, err) + actual, err := f.Ancient("a1", 2) + require.NoError(t, err) + require.Equal(t, item, actual) + require.NoError(t, f.Close()) + + // reopen and read + f, err = NewFreezer(dir, "", true, 0, 2049, map[string]bool{"o1": true, "o2": true, "a1": true}, "a1") + require.NoError(t, err) + actual, err = f.Ancient("a1", 2) + require.NoError(t, err) + require.Equal(t, item, actual) + require.NoError(t, f.Close()) +} + func newFreezerForTesting(t *testing.T, tables map[string]bool) (*Freezer, string) { t.Helper() diff --git a/core/rawdb/prunedfreezer.go b/core/rawdb/prunedfreezer.go index ddc77cd49a..992d7802ed 100644 --- a/core/rawdb/prunedfreezer.go +++ b/core/rawdb/prunedfreezer.go @@ -312,6 +312,10 @@ func (f *prunedfreezer) freeze() { } } +func (f *prunedfreezer) SetupFreezerEnv(env *ethdb.FreezerEnv) error { + return nil +} + func (f *prunedfreezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { return fn(f) } @@ -323,3 +327,7 @@ func (f *prunedfreezer) AncientRange(kind string, start, count, maxBytes uint64) func (f *prunedfreezer) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) { return 0, errNotSupported } + +func (f *prunedfreezer) ResetTable(kind string, tail uint64, head uint64, onlyEmpty bool) error { + return errNotSupported +} diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 02a41c6903..0771a393ff 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -149,6 +149,8 @@ var ( CliqueSnapshotPrefix = []byte("clique-") ParliaSnapshotPrefix = []byte("parlia-") + BlockBlobsPrefix = []byte("blobs") + preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil) preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil) ) @@ -203,6 +205,11 @@ func blockReceiptsKey(number uint64, hash common.Hash) []byte { return append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...) } +// blockBlobsKey = BlockBlobsPrefix + blockNumber (uint64 big endian) + blockHash +func blockBlobsKey(number uint64, hash common.Hash) []byte { + return append(append(BlockBlobsPrefix, encodeBlockNumber(number)...), hash.Bytes()...) +} + // diffLayerKey = diffLayerKeyPrefix + hash func diffLayerKey(hash common.Hash) []byte { return append(diffLayerPrefix, hash.Bytes()...) diff --git a/core/rawdb/table.go b/core/rawdb/table.go index 509bdbc94e..6915ae9d21 100644 --- a/core/rawdb/table.go +++ b/core/rawdb/table.go @@ -101,6 +101,10 @@ func (t *table) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, erro return t.db.ModifyAncients(fn) } +func (t *table) ResetTable(kind string, tail uint64, head uint64, onlyEmpty bool) error { + return t.db.ResetTable(kind, tail, head, onlyEmpty) +} + func (t *table) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) { return t.db.ReadAncients(fn) } @@ -225,6 +229,10 @@ func (t *table) NewSnapshot() (ethdb.Snapshot, error) { return t.db.NewSnapshot() } +func (t *table) SetupFreezerEnv(env *ethdb.FreezerEnv) error { + return nil +} + // tableBatch is a wrapper around a database batch that prefixes each key access // with a pre-configured string. type tableBatch struct { diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go index 3ba90f73de..0ef216a5d2 100644 --- a/core/state/pruner/pruner.go +++ b/core/state/pruner/pruner.go @@ -440,10 +440,20 @@ func (p *BlockPruner) backUpOldDb(name string, cache, handles int, namespace str if td == nil { return consensus.ErrUnknownAncestor } - // Write into new ancient_back db. - if _, err := rawdb.WriteAncientBlocks(frdbBack, []*types.Block{block}, []types.Receipts{receipts}, td); err != nil { - log.Error("failed to write new ancient", "error", err) - return err + // if there has blobs, it needs to back up too. + blobs := rawdb.ReadRawBlobs(chainDb, blockHash, blockNumber) + if blobs != nil { + // Write into new ancient_back db. + if _, err := rawdb.WriteAncientBlocksWithBlobs(frdbBack, []*types.Block{block}, []types.Receipts{receipts}, td, []types.BlobTxSidecars{blobs}); err != nil { + log.Error("failed to write new ancient", "error", err) + return err + } + } else { + // Write into new ancient_back db. + if _, err := rawdb.WriteAncientBlocks(frdbBack, []*types.Block{block}, []types.Receipts{receipts}, td); err != nil { + log.Error("failed to write new ancient", "error", err) + return err + } } // Print the log every 5s for better trace. if common.PrettyDuration(time.Since(start)) > common.PrettyDuration(5*time.Second) { diff --git a/core/types/block.go b/core/types/block.go index ac6066dd9b..995cf3c64f 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -232,6 +232,9 @@ type Block struct { // inter-peer block relay. ReceivedAt time.Time ReceivedFrom interface{} + + // blobs provides DA check + blobs BlobTxSidecars } // "external" block encoding. used for eth protocol, etc. @@ -451,6 +454,10 @@ func (b *Block) SanityCheck() error { return b.header.SanityCheck() } +func (b *Block) Blobs() BlobTxSidecars { + return b.blobs +} + type writeCounter uint64 func (c *writeCounter) Write(b []byte) (int, error) { @@ -512,6 +519,17 @@ func (b *Block) WithWithdrawals(withdrawals []*Withdrawal) *Block { return block } +// WithBlobs returns a block containing the given blobs. +func (b *Block) WithBlobs(blobs BlobTxSidecars) *Block { + block := &Block{ + header: b.header, + transactions: b.transactions, + uncles: b.uncles, + blobs: blobs, + } + return block +} + // Hash returns the keccak256 hash of b's header. // The hash is computed on the first call and cached thereafter. func (b *Block) Hash() common.Hash { diff --git a/core/types/tx_blob.go b/core/types/tx_blob.go index caede7cc53..1931d18f66 100644 --- a/core/types/tx_blob.go +++ b/core/types/tx_blob.go @@ -52,6 +52,8 @@ type BlobTx struct { S *uint256.Int `json:"s" gencodec:"required"` } +type BlobTxSidecars []*BlobTxSidecar + // BlobTxSidecar contains the blobs of a blob transaction. type BlobTxSidecar struct { Blobs []kzg4844.Blob // Blobs needed by the blob pool diff --git a/core/types/tx_blob_test.go b/core/types/tx_blob_test.go index 44ac48cc6f..3b0f4275f3 100644 --- a/core/types/tx_blob_test.go +++ b/core/types/tx_blob_test.go @@ -2,11 +2,15 @@ package types import ( "crypto/ecdsa" + "fmt" "testing" + "github.com/stretchr/testify/require" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/rlp" "github.com/holiman/uint256" ) @@ -88,3 +92,56 @@ func createEmptyBlobTx(key *ecdsa.PrivateKey, withSidecar bool) *Transaction { signer := NewCancunSigner(blobtx.ChainID.ToBig()) return MustSignNewTx(key, signer, blobtx) } + +func TestBlobTxSidecars_Encode(t *testing.T) { + tests := []struct { + raw BlobTxSidecars + err bool + }{ + { + raw: BlobTxSidecars{ + &BlobTxSidecar{ + Blobs: []kzg4844.Blob{emptyBlob}, + Commitments: []kzg4844.Commitment{emptyBlobCommit}, + Proofs: []kzg4844.Proof{emptyBlobProof}, + }, + &BlobTxSidecar{ + Blobs: []kzg4844.Blob{emptyBlob}, + Commitments: []kzg4844.Commitment{emptyBlobCommit}, + Proofs: []kzg4844.Proof{emptyBlobProof}, + }, + }, + err: false, + }, + { + raw: BlobTxSidecars{ + &BlobTxSidecar{ + Blobs: []kzg4844.Blob{emptyBlob}, + Commitments: []kzg4844.Commitment{emptyBlobCommit}, + Proofs: []kzg4844.Proof{emptyBlobProof}, + }, + nil, + }, + err: true, + }, + { + raw: BlobTxSidecars{}, + err: false, + }, + } + + for i, item := range tests { + t.Run(fmt.Sprintf("case%d", i), func(t *testing.T) { + enc, err := rlp.EncodeToBytes(item.raw) + require.NoError(t, err) + var nbs BlobTxSidecars + err = rlp.DecodeBytes(enc, &nbs) + if item.err { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Equal(t, item.raw, nbs) + }) + } +} diff --git a/eth/backend.go b/eth/backend.go index d60e24605c..a9e721a9e2 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -195,6 +195,14 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { overrides.OverrideFeynman = config.OverrideFeynman } + // startup ancient freeze + if err = chainDb.SetupFreezerEnv(ðdb.FreezerEnv{ + ChainCfg: chainConfig, + BlobExtraReserve: config.BlobExtraReserve, + }); err != nil { + return nil, err + } + networkID := config.NetworkId if networkID == 0 { networkID = chainConfig.ChainID.Uint64() diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 7184f2dbe9..1d05dde034 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -70,7 +70,8 @@ var Defaults = Config{ RPCGasCap: 50000000, RPCEVMTimeout: 5 * time.Second, GPO: FullNodeGPO, - RPCTxFeeCap: 1, // 1 ether + RPCTxFeeCap: 1, // 1 ether + BlobExtraReserve: params.BlobExtraReserveThreshold, // Extra reserve threshold for blob, blob never expires when -1 is set, default 28800 } //go:generate go run github.com/fjl/gencodec -type Config -formats toml -out gen_config.go @@ -200,6 +201,9 @@ type Config struct { // OverrideFeynman (TODO: remove after the fork) OverrideFeynman *uint64 `toml:",omitempty"` + + // blob setting + BlobExtraReserve int64 } // CreateConsensusEngine creates a consensus engine for the given chain config. diff --git a/ethdb/database.go b/ethdb/database.go index 5af19e3478..8781316614 100644 --- a/ethdb/database.go +++ b/ethdb/database.go @@ -17,7 +17,11 @@ // Package ethdb defines the interfaces for an Ethereum data store. package ethdb -import "io" +import ( + "io" + + "github.com/ethereum/go-ethereum/params" +) // KeyValueReader wraps the Has and Get method of a backing data store. type KeyValueReader interface { @@ -136,6 +140,20 @@ type AncientWriter interface { // The second argument is a function that takes a raw entry and returns it // in the newest format. MigrateTable(string, func([]byte) ([]byte, error)) error + + // ResetTable will reset certain table to new boundary + ResetTable(kind string, tail uint64, head uint64, onlyEmpty bool) error +} + +type FreezerEnv struct { + ChainCfg *params.ChainConfig + BlobExtraReserve int64 +} + +// AncientFreezer defines the help functions for freezing ancient data +type AncientFreezer interface { + // SetupFreezerEnv provides params.ChainConfig for checking hark forks, like isCancun. + SetupFreezerEnv(env *FreezerEnv) error } // AncientWriteOp is given to the function argument of ModifyAncients. @@ -200,5 +218,6 @@ type Database interface { Stater Compacter Snapshotter + AncientFreezer io.Closer } diff --git a/ethdb/remotedb/remotedb.go b/ethdb/remotedb/remotedb.go index babb625d88..75f63bbc6a 100644 --- a/ethdb/remotedb/remotedb.go +++ b/ethdb/remotedb/remotedb.go @@ -114,6 +114,14 @@ func (db *Database) ModifyAncients(f func(ethdb.AncientWriteOp) error) (int64, e panic("not supported") } +func (db *Database) ResetTable(kind string, tail uint64, head uint64, onlyEmpty bool) error { + panic("not supported") +} + +func (db *Database) AncientReset(tail, head uint64) error { + panic("not supported") +} + func (db *Database) TruncateHead(n uint64) (uint64, error) { panic("not supported") } @@ -163,6 +171,10 @@ func (db *Database) Close() error { return nil } +func (db *Database) SetupFreezerEnv(env *ethdb.FreezerEnv) error { + panic("not supported") +} + func New(client *rpc.Client) ethdb.Database { return &Database{ remote: client, diff --git a/miner/worker.go b/miner/worker.go index 63304be6d1..b960f8b565 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -959,6 +959,9 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { header.BlobGasUsed = new(uint64) header.ExcessBlobGas = &excessBlobGas header.ParentBeaconRoot = genParams.beaconRoot + if w.chainConfig.Parlia != nil { + header.WithdrawalsHash = new(common.Hash) + } } // Run the consensus preparation with the default or customized consensus engine. if err := w.engine.Prepare(w.chain, header); err != nil { diff --git a/params/config.go b/params/config.go index 1e460f0745..4ff2a86d0b 100644 --- a/params/config.go +++ b/params/config.go @@ -149,6 +149,8 @@ var ( // UnixTime: 1705996800 is January 23, 2024 8:00:00 AM UTC ShanghaiTime: newUint64(1705996800), KeplerTime: newUint64(1705996800), + // TODO(GalaIO): enable cancun fork time later + //CancunTime: newUint64(), // TODO FeynmanTime: nil, @@ -189,6 +191,8 @@ var ( ShanghaiTime: newUint64(1702972800), KeplerTime: newUint64(1702972800), FeynmanTime: newUint64(1710136800), + // TODO(GalaIO): enable cancun fork time later + //CancunTime: newUint64(), Parlia: &ParliaConfig{ Period: 3, @@ -226,6 +230,7 @@ var ( ShanghaiTime: newUint64(0), KeplerTime: newUint64(0), FeynmanTime: newUint64(0), + CancunTime: newUint64(0), Parlia: &ParliaConfig{ Period: 3, @@ -589,7 +594,12 @@ func (c *ChainConfig) String() string { FeynmanTime = big.NewInt(0).SetUint64(*c.FeynmanTime) } - return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, CatalystBlock: %v, London: %v, ArrowGlacier: %v, MergeFork:%v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Planck: %v,Luban: %v, Plato: %v, Hertz: %v, Hertzfix: %v, ShanghaiTime: %v, KeplerTime: %v, FeynmanTime: %v, Engine: %v}", + var CancunTime *big.Int + if c.CancunTime != nil { + CancunTime = big.NewInt(0).SetUint64(*c.CancunTime) + } + + return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, CatalystBlock: %v, London: %v, ArrowGlacier: %v, MergeFork:%v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Planck: %v,Luban: %v, Plato: %v, Hertz: %v, Hertzfix: %v, ShanghaiTime: %v, KeplerTime: %v, FeynmanTime: %v, CancunTime: %v, Engine: %v}", c.ChainID, c.HomesteadBlock, c.DAOForkBlock, @@ -624,6 +634,7 @@ func (c *ChainConfig) String() string { ShanghaiTime, KeplerTime, FeynmanTime, + CancunTime, engine, ) } @@ -938,7 +949,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error { {name: "hertzfixBlock", block: c.HertzfixBlock}, {name: "keplerTime", timestamp: c.KeplerTime}, {name: "feynmanTime", timestamp: c.FeynmanTime}, - {name: "cancunTime", timestamp: c.CancunTime, optional: true}, + {name: "cancunTime", timestamp: c.CancunTime}, {name: "pragueTime", timestamp: c.PragueTime, optional: true}, {name: "verkleTime", timestamp: c.VerkleTime, optional: true}, } { diff --git a/params/protocol_params.go b/params/protocol_params.go index b84fa148fc..4930f69efa 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -183,6 +183,9 @@ const ( BlobTxTargetBlobGasPerBlock = 3 * BlobTxBlobGasPerBlob // Target consumable blob gas for data blobs per block (for 1559-like pricing) MaxBlobGasPerBlock = 6 * BlobTxBlobGasPerBlob // Maximum consumable blob gas for data blobs per block + + BlobReserveThreshold = 18 * (24 * 3600) / 3 // it keeps blob data available for 18 days in local. + BlobExtraReserveThreshold = 1 * (24 * 3600) / 3 // it adds more time for expired blobs for some request cases, like expiry blob when remote peer is syncing, default 1 day. ) // Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations