Skip to content

Commit

Permalink
resync (#1164)
Browse files Browse the repository at this point in the history
* resync

* reapply fix
  • Loading branch information
ceyonur authored May 7, 2024
1 parent 02f673e commit 3fbfb7d
Show file tree
Hide file tree
Showing 7 changed files with 414 additions and 86 deletions.
114 changes: 114 additions & 0 deletions core/state/snapshot/context.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
// (c) 2019-2024, Ava Labs, Inc.
//
// This file is a derived work, based on the go-ethereum library whose original
// notices appear below.
//
// It is distributed under a license compatible with the licensing terms of the
// original code from which it is derived.
//
// Much love to the original authors for their work.
// **********
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.

package snapshot

import (
"encoding/binary"
"fmt"
"math"
"time"

"golang.org/x/exp/slog"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)

type generatorStats struct {
wiping chan struct{} // Notification channel if wiping is in progress
origin uint64 // Origin prefix where generation started
start time.Time // Timestamp when generation started
accounts uint64 // Number of accounts indexed(generated or recovered)
slots uint64 // Number of storage slots indexed(generated or recovered)
storage common.StorageSize // Total account and storage slot size(generation or recovery)
}

// Info creates an contextual info-level log with the given message and the context pulled
// from the internally maintained statistics.
func (gs *generatorStats) Info(msg string, root common.Hash, marker []byte) {
gs.log(log.LvlInfo, msg, root, marker)
}

// Debug creates an contextual debug-level log with the given message and the context pulled
// from the internally maintained statistics.
func (gs *generatorStats) Debug(msg string, root common.Hash, marker []byte) {
gs.log(log.LvlDebug, msg, root, marker)
}

// log creates an contextual log with the given message and the context pulled
// from the internally maintained statistics.
func (gs *generatorStats) log(level slog.Level, msg string, root common.Hash, marker []byte) {
var ctx []interface{}
if root != (common.Hash{}) {
ctx = append(ctx, []interface{}{"root", root}...)
}
// Figure out whether we're after or within an account
switch len(marker) {
case common.HashLength:
ctx = append(ctx, []interface{}{"at", common.BytesToHash(marker)}...)
case 2 * common.HashLength:
ctx = append(ctx, []interface{}{
"in", common.BytesToHash(marker[:common.HashLength]),
"at", common.BytesToHash(marker[common.HashLength:]),
}...)
}
// Add the usual measurements
ctx = append(ctx, []interface{}{
"accounts", gs.accounts,
"slots", gs.slots,
"storage", gs.storage,
"elapsed", common.PrettyDuration(time.Since(gs.start)),
}...)
// Calculate the estimated indexing time based on current stats
if len(marker) > 0 {
if done := binary.BigEndian.Uint64(marker[:8]) - gs.origin; done > 0 {
left := math.MaxUint64 - binary.BigEndian.Uint64(marker[:8])

speed := done/uint64(time.Since(gs.start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
ctx = append(ctx, []interface{}{
"eta", common.PrettyDuration(time.Duration(left/speed) * time.Millisecond),
}...)
}
}

switch level {
case log.LvlTrace:
log.Trace(msg, ctx...)
case log.LvlDebug:
log.Debug(msg, ctx...)
case log.LvlInfo:
log.Info(msg, ctx...)
case log.LevelWarn:
log.Warn(msg, ctx...)
case log.LevelError:
log.Error(msg, ctx...)
case log.LevelCrit:
log.Crit(msg, ctx...)
default:
log.Error(fmt.Sprintf("log with invalid log level %s: %s", level, msg), ctx...)
}
}
81 changes: 0 additions & 81 deletions core/state/snapshot/generate.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,18 +28,14 @@ package snapshot

import (
"bytes"
"encoding/binary"
"fmt"
"time"

"golang.org/x/exp/slog"

"github.com/ava-labs/subnet-evm/core/rawdb"
"github.com/ava-labs/subnet-evm/core/types"
"github.com/ava-labs/subnet-evm/trie"
"github.com/ava-labs/subnet-evm/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
Expand All @@ -50,83 +46,6 @@ const (
snapshotCacheStatsUpdateFrequency = 1000 // update stats from the snapshot fastcache once per 1000 ops
)

// generatorStats is a collection of statistics gathered by the snapshot generator
// for logging purposes.
type generatorStats struct {
wiping chan struct{} // Notification channel if wiping is in progress
origin uint64 // Origin prefix where generation started
start time.Time // Timestamp when generation started
accounts uint64 // Number of accounts indexed(generated or recovered)
slots uint64 // Number of storage slots indexed(generated or recovered)
storage common.StorageSize // Total account and storage slot size(generation or recovery)
}

// Info creates an contextual info-level log with the given message and the context pulled
// from the internally maintained statistics.
func (gs *generatorStats) Info(msg string, root common.Hash, marker []byte) {
gs.log(log.LvlInfo, msg, root, marker)
}

// Debug creates an contextual debug-level log with the given message and the context pulled
// from the internally maintained statistics.
func (gs *generatorStats) Debug(msg string, root common.Hash, marker []byte) {
gs.log(log.LvlDebug, msg, root, marker)
}

// log creates an contextual log with the given message and the context pulled
// from the internally maintained statistics.
func (gs *generatorStats) log(level slog.Level, msg string, root common.Hash, marker []byte) {
var ctx []interface{}
if root != (common.Hash{}) {
ctx = append(ctx, []interface{}{"root", root}...)
}
// Figure out whether we're after or within an account
switch len(marker) {
case common.HashLength:
ctx = append(ctx, []interface{}{"at", common.BytesToHash(marker)}...)
case 2 * common.HashLength:
ctx = append(ctx, []interface{}{
"in", common.BytesToHash(marker[:common.HashLength]),
"at", common.BytesToHash(marker[common.HashLength:]),
}...)
}
// Add the usual measurements
ctx = append(ctx, []interface{}{
"accounts", gs.accounts,
"slots", gs.slots,
"storage", gs.storage,
"elapsed", common.PrettyDuration(time.Since(gs.start)),
}...)
// Calculate the estimated indexing time based on current stats
if len(marker) > 0 {
if done := binary.BigEndian.Uint64(marker[:8]) - gs.origin; done > 0 {
left := math.MaxUint64 - binary.BigEndian.Uint64(marker[:8])

speed := done/uint64(time.Since(gs.start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
ctx = append(ctx, []interface{}{
"eta", common.PrettyDuration(time.Duration(left/speed) * time.Millisecond),
}...)
}
}

switch level {
case log.LvlTrace:
log.Trace(msg, ctx...)
case log.LvlDebug:
log.Debug(msg, ctx...)
case log.LvlInfo:
log.Info(msg, ctx...)
case log.LevelWarn:
log.Warn(msg, ctx...)
case log.LevelError:
log.Error(msg, ctx...)
case log.LevelCrit:
log.Crit(msg, ctx...)
default:
log.Error(fmt.Sprintf("log with invalid log level %s: %s", level, msg), ctx...)
}
}

// generateSnapshot regenerates a brand new snapshot based on an existing state
// database and head block asynchronously. The snapshot is returned immediately
// and generation is continued in the background until done.
Expand Down
54 changes: 54 additions & 0 deletions core/vm/contracts_fuzz_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
// (c) 2019-2024, Ava Labs, Inc.
//
// This file is a derived work, based on the go-ethereum library whose original
// notices appear below.
//
// It is distributed under a license compatible with the licensing terms of the
// original code from which it is derived.
//
// Much love to the original authors for their work.
// **********
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.

package vm

import (
"testing"

"github.com/ethereum/go-ethereum/common"
)

func FuzzPrecompiledContracts(f *testing.F) {
// Create list of addresses
var addrs []common.Address
for k := range allPrecompiles {
addrs = append(addrs, k)
}
f.Fuzz(func(t *testing.T, addr uint8, input []byte) {
a := addrs[int(addr)%len(addrs)]
p := allPrecompiles[a]
gas := p.RequiredGas(input)
if gas > 10_000_000 {
return
}
inWant := string(input)
RunPrecompiledContract(p, input, gas)
if inHave := string(input); inWant != inHave {
t.Errorf("Precompiled %v modified input data", a)
}
})
}
2 changes: 1 addition & 1 deletion core/vm/jump_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ func validate(jt JumpTable) JumpTable {

func newCancunInstructionSet() JumpTable {
instructionSet := newDurangoInstructionSet()
enable4844(&instructionSet) // EIP-4844 (DATAHASH opcode)
enable4844(&instructionSet) // EIP-4844 (BLOBHASH opcode)
enable7516(&instructionSet) // EIP-7516 (BLOBBASEFEE opcode)
enable1153(&instructionSet) // EIP-1153 "Transient Storage"
enable5656(&instructionSet) // EIP-5656 (MCOPY opcode)
Expand Down
Loading

0 comments on commit 3fbfb7d

Please sign in to comment.