Skip to content

Commit

Permalink
Merge branch 'develop'
Browse files Browse the repository at this point in the history
  • Loading branch information
Jolly23 committed May 20, 2024
2 parents 269de49 + 6603d6f commit 33fd4dd
Show file tree
Hide file tree
Showing 8 changed files with 309 additions and 214 deletions.
21 changes: 18 additions & 3 deletions cmd/geth/dbcmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,12 +106,12 @@ Remove blockchain and state databases`,
dbInspectTrieCmd = &cli.Command{
Action: inspectTrie,
Name: "inspect-trie",
ArgsUsage: "<blocknum> <jobnum>",
ArgsUsage: "<blocknum> <jobnum> <topn>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
},
Usage: "Inspect the MPT tree of the account and contract.",
Usage: "Inspect the MPT tree of the account and contract. 'blocknum' can be latest/snapshot/number. 'topn' means output the top N storage tries info ranked by the total number of TrieNodes",
Description: `This commands iterates the entrie WorldState.`,
}
dbCheckStateContentCmd = &cli.Command{
Expand Down Expand Up @@ -386,6 +386,7 @@ func inspectTrie(ctx *cli.Context) error {
blockNumber uint64
trieRootHash common.Hash
jobnum uint64
topN uint64
)

stack, _ := makeConfigNode(ctx)
Expand All @@ -411,12 +412,25 @@ func inspectTrie(ctx *cli.Context) error {

if ctx.NArg() == 1 {
jobnum = 1000
topN = 10
} else if ctx.NArg() == 2 {
var err error
jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64)
if err != nil {
return fmt.Errorf("failed to Parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
}
topN = 10
} else {
var err error
jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64)
if err != nil {
return fmt.Errorf("failed to Parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
}

topN, err = strconv.ParseUint(ctx.Args().Get(2), 10, 64)
if err != nil {
return fmt.Errorf("failed to Parse topn, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
}
}

if blockNumber != math.MaxUint64 {
Expand All @@ -437,6 +451,7 @@ func inspectTrie(ctx *cli.Context) error {
if dbScheme == rawdb.PathScheme {
config = &triedb.Config{
PathDB: utils.PathDBConfigAddJournalFilePath(stack, pathdb.ReadOnly),
Cache: 0,
}
} else if dbScheme == rawdb.HashScheme {
config = triedb.HashDefaults
Expand All @@ -448,7 +463,7 @@ func inspectTrie(ctx *cli.Context) error {
fmt.Printf("fail to new trie tree, err: %v, rootHash: %v\n", err, trieRootHash.String())
return err
}
theInspect, err := trie.NewInspector(theTrie, triedb, trieRootHash, blockNumber, jobnum)
theInspect, err := trie.NewInspector(theTrie, triedb, trieRootHash, blockNumber, jobnum, int(topN))
if err != nil {
return err
}
Expand Down
51 changes: 51 additions & 0 deletions cmd/jsutils/check_blobtx.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import { ethers } from "ethers";
import program from "commander";

// depends on ethjs v6.11.0+ for 4844, https://github.com/ethers-io/ethers.js/releases/tag/v6.11.0
// BSC testnet enabled 4844 on block: 39539137
// Usage:
// nvm use 20
// node check_blobtx.js --rpc https://data-seed-prebsc-1-s1.binance.org:8545 --startNum 39539137
// node check_blobtx.js --rpc https://data-seed-prebsc-1-s1.binance.org:8545 --startNum 39539137 --endNum 40345994
program.option("--rpc <Rpc>", "Rpc Server URL");
program.option("--startNum <Num>", "start block", 0);
program.option("--endNum <Num>", "end block", 0);
program.parse(process.argv);

const provider = new ethers.JsonRpcProvider(program.rpc);
const main = async () => {
var startBlock = parseInt(program.startNum)
var endBlock = parseInt(program.endNum)
if (isNaN(endBlock) || isNaN(startBlock) || startBlock == 0) {
console.error("invalid input, --startNum", program.startNum, "--end", program.endNum)
return
}
// if --endNum is not specified, set it to the latest block number.
if (endBlock == 0) {
endBlock = await provider.getBlockNumber();
}
if (startBlock > endBlock) {
console.error("invalid input, startBlock:",startBlock, " endBlock:", endBlock);
return
}

for (let i = startBlock; i <= endBlock; i++) {
let blockData = await provider.getBlock(i);
console.log("startBlock:",startBlock, "endBlock:", endBlock, "curBlock", i, "blobGasUsed", blockData.blobGasUsed);
if (blockData.blobGasUsed == 0) {
continue
}
for (let txIndex = 0; txIndex<= blockData.transactions.length - 1; txIndex++) {
let txHash = blockData.transactions[txIndex]
let txData = await provider.getTransaction(txHash);
if (txData.type == 3) {
console.log("BlobTx in block:",i, " txIndex:", txIndex, " txHash:", txHash);
}
}
}
};
main().then(() => process.exit(0))
.catch((error) => {
console.error(error);
process.exit(1);
});
46 changes: 26 additions & 20 deletions core/block_validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,31 @@ func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engin
return validator
}

// ValidateListsInBody validates that UncleHash, WithdrawalsHash, and WithdrawalsHash correspond to the lists in the block body, respectively.
func ValidateListsInBody(block *types.Block) error {
header := block.Header()
if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
return fmt.Errorf("uncle root hash mismatch (header value %x, calculated %x)", header.UncleHash, hash)
}
if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash {
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
}
// Withdrawals are present after the Shanghai fork.
if header.WithdrawalsHash != nil {
// Withdrawals list must be present in body after Shanghai.
if block.Withdrawals() == nil {
return errors.New("missing withdrawals in block body")
}
if hash := types.DeriveSha(block.Withdrawals(), trie.NewStackTrie(nil)); hash != *header.WithdrawalsHash {
return fmt.Errorf("withdrawals root hash mismatch (header value %x, calculated %x)", *header.WithdrawalsHash, hash)
}
} else if block.Withdrawals() != nil { // Withdrawals turn into empty from nil when BlockBody has Sidecars
// Withdrawals are not allowed prior to shanghai fork
return errors.New("withdrawals present in block body")
}
return nil
}

// ValidateBody validates the given block's uncles and verifies the block
// header's transaction and uncle roots. The headers are assumed to be already
// validated at this point.
Expand All @@ -83,31 +108,12 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
if err := v.engine.VerifyUncles(v.bc, block); err != nil {
return err
}
if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
return fmt.Errorf("uncle root hash mismatch (header value %x, calculated %x)", header.UncleHash, hash)
}

validateFuns := []func() error{
func() error {
if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash {
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
}
return nil
return ValidateListsInBody(block)
},
func() error {
// Withdrawals are present after the Shanghai fork.
if header.WithdrawalsHash != nil {
// Withdrawals list must be present in body after Shanghai.
if block.Withdrawals() == nil {
return errors.New("missing withdrawals in block body")
}
if hash := types.DeriveSha(block.Withdrawals(), trie.NewStackTrie(nil)); hash != *header.WithdrawalsHash {
return fmt.Errorf("withdrawals root hash mismatch (header value %x, calculated %x)", *header.WithdrawalsHash, hash)
}
} else if block.Withdrawals() != nil { // Withdrawals turn into empty from nil when BlockBody has Sidecars
// Withdrawals are not allowed prior to shanghai fork
return errors.New("withdrawals present in block body")
}
// Blob transactions may be present after the Cancun fork.
var blobs int
for i, tx := range block.Transactions() {
Expand Down
9 changes: 9 additions & 0 deletions core/blockchain_reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -511,3 +511,12 @@ func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscr
func (bc *BlockChain) SubscribeFinalizedHeaderEvent(ch chan<- FinalizedHeaderEvent) event.Subscription {
return bc.scope.Track(bc.finalizedHeaderFeed.Subscribe(ch))
}

// AncientTail retrieves the tail the ancients blocks
func (bc *BlockChain) AncientTail() (uint64, error) {
tail, err := bc.db.Tail()
if err != nil {
return 0, err
}
return tail, nil
}
4 changes: 2 additions & 2 deletions core/rawdb/freezer.go
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ func (f *Freezer) Ancient(kind string, number uint64) ([]byte, error) {
// - if maxBytes is not specified, 'count' items will be returned if they are present.
func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
if table := f.tables[kind]; table != nil {
return table.RetrieveItems(start, count, maxBytes)
return table.RetrieveItems(start-f.offset, count, maxBytes)
}
return nil, errUnknownTable
}
Expand All @@ -252,7 +252,7 @@ func (f *Freezer) Ancients() (uint64, error) {
func (f *Freezer) TableAncients(kind string) (uint64, error) {
f.writeLock.RLock()
defer f.writeLock.RUnlock()
return f.tables[kind].items.Load(), nil
return f.tables[kind].items.Load() + f.offset, nil
}

// ItemAmountInAncient returns the actual length of current ancientDB.
Expand Down
8 changes: 8 additions & 0 deletions eth/downloader/downloader.go
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,9 @@ type BlockChain interface {

// UpdateChasingHead update remote best chain head, used by DA check now.
UpdateChasingHead(head *types.Header)

// AncientTail retrieves the tail the ancients blocks
AncientTail() (uint64, error)
}

type DownloadOption func(downloader *Downloader) *Downloader
Expand Down Expand Up @@ -797,6 +800,11 @@ func (d *Downloader) findAncestor(p *peerConnection, localHeight uint64, remoteH
// We're above the max reorg threshold, find the earliest fork point
floor = int64(localHeight - maxForkAncestry)
}
// if we have pruned too much history, reset the floor
if tail, err := d.blockchain.AncientTail(); err == nil && tail > uint64(floor) {
floor = int64(tail)
}

// If we're doing a light sync, ensure the floor doesn't go below the CHT, as
// all headers before that point will be missing.
if mode == LightSync {
Expand Down
34 changes: 15 additions & 19 deletions eth/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -320,26 +320,22 @@ func newHandler(config *handlerConfig) (*handler, error) {
}

broadcastBlockWithCheck := func(block *types.Block, propagate bool) {
// All the block fetcher activities should be disabled
// after the transition. Print the warning log.
if h.merger.PoSFinalized() {
log.Warn("Unexpected validation activity", "hash", block.Hash(), "number", block.Number())
return
}
// Reject all the PoS style headers in the first place. No matter
// the chain has finished the transition or not, the PoS headers
// should only come from the trusted consensus layer instead of
// p2p network.
if beacon, ok := h.chain.Engine().(*beacon.Beacon); ok {
if beacon.IsPoSHeader(block.Header()) {
log.Warn("unexpected post-merge header")
return
}
}
if propagate {
if err := core.IsDataAvailable(h.chain, block); err != nil {
log.Error("Propagating block with invalid sidecars", "number", block.Number(), "hash", block.Hash(), "err", err)
return
checkErrs := make(chan error, 2)

go func() {
checkErrs <- core.ValidateListsInBody(block)
}()
go func() {
checkErrs <- core.IsDataAvailable(h.chain, block)
}()

for i := 0; i < cap(checkErrs); i++ {
err := <-checkErrs
if err != nil {
log.Error("Propagating invalid block", "number", block.Number(), "hash", block.Hash(), "err", err)
return
}
}
}
h.BroadcastBlock(block, propagate)
Expand Down
Loading

0 comments on commit 33fd4dd

Please sign in to comment.