Skip to content

Commit

Permalink
Merge pull request bnb-chain#1061 from qinglin89/dev-merge
Browse files Browse the repository at this point in the history
fix: upstream patches
  • Loading branch information
unclezoro authored Aug 23, 2022
2 parents 2e22fe3 + 94885ae commit f2b1f3f
Show file tree
Hide file tree
Showing 24 changed files with 159 additions and 127 deletions.
2 changes: 1 addition & 1 deletion appveyor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ environment:
GETH_MINGW: 'C:\msys64\mingw32'

install:
- git submodule update --init --depth 1
- git submodule update --init --depth 1 --recursive
- go version

for:
Expand Down
11 changes: 8 additions & 3 deletions build/ci.go
Original file line number Diff line number Diff line change
Expand Up @@ -342,12 +342,17 @@ func downloadLinter(cachedir string) string {

csdb := build.MustLoadChecksums("build/checksums.txt")
arch := runtime.GOARCH
if arch == "arm" {
ext := ".tar.gz"

if runtime.GOOS == "windows" {
ext = ".zip"
}
if arch == "arm" {
arch += "v" + os.Getenv("GOARM")
}
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, arch)
url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s.tar.gz", version, base)
archivePath := filepath.Join(cachedir, base+".tar.gz")
url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s%s", version, base, ext)
archivePath := filepath.Join(cachedir, base+ext)
if err := csdb.DownloadFile(url, archivePath); err != nil {
log.Fatal(err)
}
Expand Down
13 changes: 5 additions & 8 deletions cmd/geth/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -601,8 +601,7 @@ func traverseState(ctx *cli.Context) error {
}
}
if !bytes.Equal(acc.CodeHash, emptyCode) {
code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash))
if len(code) == 0 {
if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) {
log.Error("Code is missing", "hash", common.BytesToHash(acc.CodeHash))
return errors.New("missing code")
}
Expand Down Expand Up @@ -673,11 +672,10 @@ func traverseRawState(ctx *cli.Context) error {
nodes += 1
node := accIter.Hash()

// Check the present for non-empty hash node(embedded node doesn't
// have their own hash).
if node != (common.Hash{}) {
// Check the present for non-empty hash node(embedded node doesn't
// have their own hash).
blob := rawdb.ReadTrieNode(chaindb, node)
if len(blob) == 0 {
if !rawdb.HasTrieNode(chaindb, node) {
log.Error("Missing trie node(account)", "hash", node)
return errors.New("missing account")
}
Expand Down Expand Up @@ -721,8 +719,7 @@ func traverseRawState(ctx *cli.Context) error {
}
}
if !bytes.Equal(acc.CodeHash, emptyCode) {
code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash))
if len(code) == 0 {
if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) {
log.Error("Code is missing", "account", common.BytesToHash(accIter.LeafKey()))
return errors.New("missing code")
}
Expand Down
3 changes: 3 additions & 0 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -2919,6 +2919,9 @@ Error: %v
// of the header retrieval mechanisms already need to verify nonces, as well as
// because nonces can be verified sparsely, not needing to check each.
func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
if len(chain) == 0 {
return 0, nil
}
start := time.Now()
if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
return i, err
Expand Down
74 changes: 43 additions & 31 deletions core/rawdb/accessors_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,17 +28,6 @@ func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte {
return data
}

// WritePreimages writes the provided set of preimages to the database.
func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
for hash, preimage := range preimages {
if err := db.Put(preimageKey(hash), preimage); err != nil {
log.Crit("Failed to store trie preimage", "err", err)
}
}
preimageCounter.Inc(int64(len(preimages)))
preimageHitCounter.Inc(int64(len(preimages)))
}

// ReadCode retrieves the contract code of the provided code hash.
func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
// Try with the prefixed code scheme first, if not then try with legacy
Expand All @@ -47,7 +36,7 @@ func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
if len(data) != 0 {
return data
}
data, _ = db.Get(hash[:])
data, _ = db.Get(hash.Bytes())
return data
}

Expand All @@ -59,6 +48,24 @@ func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte {
return data
}

// ReadTrieNode retrieves the trie node of the provided hash.
func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
data, _ := db.Get(hash.Bytes())
return data
}

// HasCode checks if the contract code corresponding to the
// provided code hash is present in the db.
func HasCode(db ethdb.KeyValueReader, hash common.Hash) bool {
// Try with the prefixed code scheme first, if not then try with legacy
// scheme.
if ok := HasCodeWithPrefix(db, hash); ok {
return true
}
ok, _ := db.Has(hash.Bytes())
return ok
}

// HasCodeWithPrefix checks if the contract code corresponding to the
// provided code hash is present in the db. This function will only check
// presence using the prefix-scheme.
Expand All @@ -67,30 +74,28 @@ func HasCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) bool {
return ok
}

// WriteCode writes the provided contract code database.
func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
if err := db.Put(codeKey(hash), code); err != nil {
log.Crit("Failed to store contract code", "err", err)
}
// HasTrieNode checks if the trie node with the provided hash is present in db.
func HasTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool {
ok, _ := db.Has(hash.Bytes())
return ok
}

// DeleteCode deletes the specified contract code from the database.
func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
if err := db.Delete(codeKey(hash)); err != nil {
log.Crit("Failed to delete contract code", "err", err)
// WritePreimages writes the provided set of preimages to the database.
func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
for hash, preimage := range preimages {
if err := db.Put(preimageKey(hash), preimage); err != nil {
log.Crit("Failed to store trie preimage", "err", err)
}
}
preimageCounter.Inc(int64(len(preimages)))
preimageHitCounter.Inc(int64(len(preimages)))
}

// ReadTrieNode retrieves the trie node of the provided hash.
func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
data, _ := db.Get(hash.Bytes())
return data
}

// HasTrieNode checks if the trie node with the provided hash is present in db.
func HasTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool {
ok, _ := db.Has(hash.Bytes())
return ok
// WriteCode writes the provided contract code database.
func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
if err := db.Put(codeKey(hash), code); err != nil {
log.Crit("Failed to store contract code", "err", err)
}
}

// WriteTrieNode writes the provided trie node database.
Expand All @@ -100,6 +105,13 @@ func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
}
}

// DeleteCode deletes the specified contract code from the database.
func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
if err := db.Delete(codeKey(hash)); err != nil {
log.Crit("Failed to delete contract code", "err", err)
}
}

// DeleteTrieNode deletes the specified trie node from the database.
func DeleteTrieNode(db ethdb.KeyValueWriter, hash common.Hash) {
if err := db.Delete(hash.Bytes()); err != nil {
Expand Down
6 changes: 3 additions & 3 deletions core/state/pruner/pruner.go
Original file line number Diff line number Diff line change
Expand Up @@ -584,7 +584,7 @@ func (p *Pruner) Prune(root common.Hash) error {
// Ensure the root is really present. The weak assumption
// is the presence of root can indicate the presence of the
// entire trie.
if blob := rawdb.ReadTrieNode(p.db, root); len(blob) == 0 {
if !rawdb.HasTrieNode(p.db, root) {
// The special case is for clique based networks(rinkeby, goerli
// and some other private networks), it's possible that two
// consecutive blocks will have same root. In this case snapshot
Expand All @@ -598,7 +598,7 @@ func (p *Pruner) Prune(root common.Hash) error {
// as the pruning target.
var found bool
for i := len(layers) - 2; i >= 1; i-- {
if blob := rawdb.ReadTrieNode(p.db, layers[i].Root()); len(blob) != 0 {
if rawdb.HasTrieNode(p.db, layers[i].Root()) {
root = layers[i].Root()
found = true
log.Info("Selecting middle-layer as the pruning target", "root", root, "depth", i)
Expand Down Expand Up @@ -815,7 +815,7 @@ const warningLog = `
WARNING!
The clean trie cache is not found. Please delete it by yourself after the
The clean trie cache is not found. Please delete it by yourself after the
pruning. Remember don't start the Geth without deleting the clean trie cache
otherwise the entire database may be damaged!
Expand Down
38 changes: 13 additions & 25 deletions core/state/state_object.go
Original file line number Diff line number Diff line change
Expand Up @@ -238,25 +238,10 @@ func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Has
}
// If no live objects are available, attempt to use snapshots
var (
enc []byte
err error
meter *time.Duration
enc []byte
err error
)
readStart := time.Now()
if metrics.EnabledExpensive {
// If the snap is 'under construction', the first lookup may fail. If that
// happens, we don't want to double-count the time elapsed. Thus this
// dance with the metering.
defer func() {
if meter != nil {
*meter += time.Since(readStart)
}
}()
}
if s.db.snap != nil {
if metrics.EnabledExpensive {
meter = &s.db.SnapshotStorageReads
}
// If the object was destructed in *this* block (and potentially resurrected),
// the storage has been cleared out, and we should *not* consult the previous
// snapshot about any storage values. The only possible alternatives are:
Expand All @@ -266,21 +251,24 @@ func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Has
if _, destructed := s.db.snapDestructs[s.address]; destructed {
return common.Hash{}
}
start := time.Now()
enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes()))
if metrics.EnabledExpensive {
s.db.SnapshotStorageReads += time.Since(start)
}
}

// If snapshot unavailable or reading from it failed, load from the database
if s.db.snap == nil || err != nil {
if meter != nil {
// If we already spent time checking the snapshot, account for it
// and reset the readStart
*meter += time.Since(readStart)
readStart = time.Now()
}
start := time.Now()
// if metrics.EnabledExpensive {
// meter = &s.db.StorageReads
// }
enc, err = s.getTrie(db).TryGet(key.Bytes())
if metrics.EnabledExpensive {
meter = &s.db.StorageReads
s.db.StorageReads += time.Since(start)
}
if enc, err = s.getTrie(db).TryGet(key.Bytes()); err != nil {
if err != nil {
s.setError(err)
return common.Hash{}
}
Expand Down
19 changes: 9 additions & 10 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -657,16 +657,14 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject {
return obj
}
// If no live objects are available, attempt to use snapshots
var (
data *types.StateAccount
err error
)
var data *types.StateAccount
if s.snap != nil {
start := time.Now()
acc, err := s.snap.Account(crypto.HashData(s.hasher, addr.Bytes()))
if metrics.EnabledExpensive {
defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now())
s.SnapshotAccountReads += time.Since(start)
}
var acc *snapshot.Account
if acc, err = s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())); err == nil {
if err == nil {
if acc == nil {
return nil
}
Expand All @@ -686,7 +684,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject {
}

// If snapshot unavailable or reading from it failed, load from the database
if s.snap == nil || err != nil {
if data == nil {
if s.trie == nil {
tr, err := s.db.OpenTrie(s.originalRoot)
if err != nil {
Expand All @@ -695,10 +693,11 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject {
}
s.trie = tr
}
start := time.Now()
enc, err := s.trie.TryGet(addr.Bytes())
if metrics.EnabledExpensive {
defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now())
s.AccountReads += time.Since(start)
}
enc, err := s.trie.TryGet(addr.Bytes())
if err != nil {
s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err))
return nil
Expand Down
2 changes: 1 addition & 1 deletion core/vm/operations_acl.go
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ var (
// see gasSStoreEIP2200(...) in core/vm/gas_table.go for more info about how EIP 2200 is specified
gasSStoreEIP2929 = makeGasSStoreFunc(params.SstoreClearsScheduleRefundEIP2200)

// gasSStoreEIP2539 implements gas cost for SSTORE according to EPI-2539
// gasSStoreEIP2539 implements gas cost for SSTORE according to EIP-2539
// Replace `SSTORE_CLEARS_SCHEDULE` with `SSTORE_RESET_GAS + ACCESS_LIST_STORAGE_KEY_COST` (4,800)
gasSStoreEIP3529 = makeGasSStoreFunc(params.SstoreClearsScheduleRefundEIP3529)
)
Expand Down
32 changes: 17 additions & 15 deletions eth/downloader/downloader.go
Original file line number Diff line number Diff line change
Expand Up @@ -1340,23 +1340,25 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
if chunkHeaders[len(chunkHeaders)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
frequency = 1
}
if n, err := d.lightchain.InsertHeaderChain(chunkHeaders, frequency); err != nil {
rollbackErr = err
if len(chunkHeaders) > 0 {
if n, err := d.lightchain.InsertHeaderChain(chunkHeaders, frequency); err != nil {
rollbackErr = err

// If some headers were inserted, track them as uncertain
if (mode == SnapSync || frequency > 1) && n > 0 && rollback == 0 {
rollback = chunkHeaders[0].Number.Uint64()
// If some headers were inserted, track them as uncertain
if (mode == SnapSync || frequency > 1) && n > 0 && rollback == 0 {
rollback = chunkHeaders[0].Number.Uint64()
}
log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err)
return fmt.Errorf("%w: %v", errInvalidChain, err)
}
log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err)
return fmt.Errorf("%w: %v", errInvalidChain, err)
}
// All verifications passed, track all headers within the alloted limits
if mode == SnapSync {
head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64()
if head-rollback > uint64(fsHeaderSafetyNet) {
rollback = head - uint64(fsHeaderSafetyNet)
} else {
rollback = 1
// All verifications passed, track all headers within the alloted limits
if mode == SnapSync {
head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64()
if head-rollback > uint64(fsHeaderSafetyNet) {
rollback = head - uint64(fsHeaderSafetyNet)
} else {
rollback = 1
}
}
}
}
Expand Down
5 changes: 4 additions & 1 deletion eth/protocols/snap/sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -2830,7 +2830,10 @@ func (s *Syncer) reportSyncProgress(force bool) {
new(big.Int).Mul(new(big.Int).SetUint64(uint64(synced)), hashSpace),
accountFills,
).Uint64())

// Don't report anything until we have a meaningful progress
if estBytes < 1.0 {
return
}
elapsed := time.Since(s.startTime)
estTime := elapsed / time.Duration(synced) * time.Duration(estBytes)

Expand Down
2 changes: 1 addition & 1 deletion eth/tracers/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -454,7 +454,7 @@ func (api *API) TraceBlockByHash(ctx context.Context, hash common.Hash, config *

// TraceBlock returns the structured logs created during the execution of EVM
// and returns them as a JSON object.
func (api *API) TraceBlock(ctx context.Context, blob []byte, config *TraceConfig) ([]*txTraceResult, error) {
func (api *API) TraceBlock(ctx context.Context, blob hexutil.Bytes, config *TraceConfig) ([]*txTraceResult, error) {
block := new(types.Block)
if err := rlp.Decode(bytes.NewReader(blob), block); err != nil {
return nil, fmt.Errorf("could not decode block: %v", err)
Expand Down
Loading

0 comments on commit f2b1f3f

Please sign in to comment.