Skip to content

Commit

Permalink
Merge pull request bnb-chain#5 from flywukong/IO-metrics
Browse files Browse the repository at this point in the history
Io metrics
  • Loading branch information
forcodedancing authored Feb 25, 2022
2 parents 2530f64 + f725981 commit be7b436
Show file tree
Hide file tree
Showing 8 changed files with 50 additions and 74 deletions.
19 changes: 9 additions & 10 deletions cachemetrics/get_gid.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package cachemetrics

import (
"github.com/petermattis/goid"
"sync/atomic"
)

var (
Expand All @@ -14,9 +15,15 @@ func Goid() int64 {
}

func UpdateMiningRoutineID(id int64) {
if MiningRoutineId != id {
MiningRoutineId = id
atomic.StoreInt64(&MiningRoutineId, id)
}

// judge if it is main process of mining
func IsMinerMainRoutineID(id int64) bool {
if id == atomic.LoadInt64(&MiningRoutineId) {
return true
}
return false
}

func UpdateSyncingRoutineID(id int64) {
Expand All @@ -32,11 +39,3 @@ func IsSyncMainRoutineID(id int64) bool {
}
return false
}

// judge if it is main process of mining
func IsMinerMainRoutineID(id int64) bool {
if id == MiningRoutineId {
return true
}
return false
}
7 changes: 1 addition & 6 deletions cmd/geth/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -203,12 +203,7 @@ func applyMetricConfig(ctx *cli.Context, cfg *gethConfig) {
if ctx.GlobalIsSet(utils.MetricsEnabledExpensiveFlag.Name) {
cfg.Metrics.EnabledExpensive = ctx.GlobalBool(utils.MetricsEnabledExpensiveFlag.Name)
}
if ctx.GlobalIsSet(utils.MetricsEnabledRecordIOFlag.Name) {
cfg.Metrics.EnabledExpensive = ctx.GlobalBool(utils.MetricsEnabledRecordIOFlag.Name)
}
if ctx.GlobalIsSet(utils.MetricsDisablePrefetchFlag.Name) {
cfg.Metrics.EnabledExpensive = ctx.GlobalBool(utils.MetricsDisablePrefetchFlag.Name)
}

if ctx.GlobalIsSet(utils.MetricsHTTPFlag.Name) {
cfg.Metrics.HTTP = ctx.GlobalString(utils.MetricsHTTPFlag.Name)
}
Expand Down
4 changes: 2 additions & 2 deletions core/state/snapshot/disklayer.go
Original file line number Diff line number Diff line change
Expand Up @@ -211,14 +211,14 @@ func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
// layer 2 miss
minerL2StorageMissMeter.Mark(1)
if hitInL3 {
syncL3StorageHitMeter.Mark(1)
minerL3StorageHitMeter.Mark(1)
cachemetrics.RecordMinerCacheDepth("MINER_L3_STORAGE")
cachemetrics.RecordMinerCacheMetrics("MINER_L3_STORAGE", start)
cachemetrics.RecordMinerTotalCosts("MINER_L3_STORAGE", start)
}
if hitInDisk {
// layer 3 miss
syncL3StorageMissMeter.Mark(1)
minerL3StorageMissMeter.Mark(1)
cachemetrics.RecordMinerCacheDepth("MINER_L4_STORAGE")
cachemetrics.RecordMinerCacheMetrics("MINER_L4_STORAGE", startGetInDisk)
cachemetrics.RecordMinerTotalCosts("MINER_L4_STORAGE", start)
Expand Down
51 changes: 27 additions & 24 deletions core/state/state_object.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,12 @@ package state
import (
"bytes"
"fmt"
"github.com/ethereum/go-ethereum/cachemetrics"
"io"
"math/big"
"time"

"github.com/ethereum/go-ethereum/cachemetrics"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/metrics"
Expand Down Expand Up @@ -192,24 +193,8 @@ func (s *StateObject) getTrie(db Database) Trie {

// GetState retrieves a value from the account storage trie.
func (s *StateObject) GetState(db Database, key common.Hash) common.Hash {
// If the fake storage is set, only lookup the state here(in the debugging mode)
if s.fakeStorage != nil {
return s.fakeStorage[key]
}
// If we have a dirty value for this state entry, return it
value, dirty := s.dirtyStorage[key]
if dirty {
return value
}
// Otherwise return the entry's original value
return s.GetCommittedState(db, key)
}

// GetCommittedState retrieves a value from the committed account storage trie.
func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Hash {
// If the fake storage is set, only lookup the state here(in the debugging mode)
start := time.Now()
hitInCache := false
start := time.Now()

defer func() {
routeid := cachemetrics.Goid()
Expand All @@ -227,18 +212,36 @@ func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Has
cachemetrics.RecordMinerTotalCosts("MINER_L1_STORAGE", start)
}
}()
// If the fake storage is set, only lookup the state here(in the debugging mode)
if s.fakeStorage != nil {
hitInCache = true
return s.fakeStorage[key]
}
// If we have a dirty value for this state entry, return it
value, dirty := s.dirtyStorage[key]
if dirty {
hitInCache = true
return value
}

// Otherwise return the entry's original value
return s.GetCommittedState(db, key, &hitInCache)
}

// GetCommittedState retrieves a value from the committed account storage trie.
func (s *StateObject) GetCommittedState(db Database, key common.Hash, hit *bool) common.Hash {
// If the fake storage is set, only lookup the state here(in the debugging mode)
if s.fakeStorage != nil {
return s.fakeStorage[key]
}
// If we have a pending write or clean cached, return that
if value, pending := s.pendingStorage[key]; pending {
hitInCache = true
*hit = true
return value
}

if value, cached := s.originStorage[key]; cached {
hitInCache = true
*hit = true
return value
}

Expand All @@ -249,7 +252,7 @@ func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Has
meter *time.Duration
)
readStart := time.Now()
if metrics.EnableIORecord {
if metrics.EnabledExpensive {
// If the snap is 'under construction', the first lookup may fail. If that
// happens, we don't want to double-count the time elapsed. Thus this
// dance with the metering.
Expand All @@ -260,7 +263,7 @@ func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Has
}()
}
if s.db.snap != nil {
if metrics.EnableIORecord {
if metrics.EnabledExpensive {
meter = &s.db.SnapshotStorageReads
}
// If the object was destructed in *this* block (and potentially resurrected),
Expand All @@ -282,7 +285,7 @@ func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Has
*meter += time.Since(readStart)
readStart = time.Now()
}
if metrics.EnableIORecord {
if metrics.EnabledExpensive {
meter = &s.db.StorageReads
}
if enc, err = s.getTrie(db).TryGet(key.Bytes()); err != nil {
Expand Down Expand Up @@ -372,10 +375,10 @@ func (s *StateObject) finalise(prefetch bool) {
slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure
}
}
overheadCost = time.Since(start)
if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != emptyRoot {
s.db.prefetcher.prefetch(s.data.Root, slotsToPrefetch, s.addrHash)
}
overheadCost = time.Since(start)
if len(s.dirtyStorage) > 0 {
s.dirtyStorage = make(Storage)
}
Expand Down
13 changes: 7 additions & 6 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -410,14 +410,14 @@ func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
if isSyncMainProcess {
syncGetDelay := time.Since(start)
totalSyncIOCost.Update(syncGetDelay)
totalMinerIOCounter.Inc(syncGetDelay.Nanoseconds())
totalSyncIOCounter.Inc(syncGetDelay.Nanoseconds())
l1AccountMeter.Mark(1)
}
// record metrics of mining main process
if isMinerMainProcess {
minerIOCost := time.Since(start)
totalMinerIOCost.Update(minerIOCost)
totalSyncIOCounter.Inc(minerIOCost.Nanoseconds())
totalMinerIOCounter.Inc(minerIOCost.Nanoseconds())
minerL1AccountMeter.Mark(1)
}
}()
Expand Down Expand Up @@ -474,8 +474,9 @@ func (s *StateDB) GetStorageProofByHash(a common.Address, key common.Hash) ([][]
// GetCommittedState retrieves a value from the given account's committed storage trie.
func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
stateObject := s.getStateObject(addr)
hit := false
if stateObject != nil {
return stateObject.GetCommittedState(s.db, hash)
return stateObject.GetCommittedState(s.db, hash, &hit)
}
return common.Hash{}
}
Expand Down Expand Up @@ -666,7 +667,6 @@ func (s *StateDB) TryPreload(block *types.Block, signer types.Signer) {
for account := range accounts {
accountsSlice = append(accountsSlice, account)
}
overheadCost = time.Since(start)

if len(accountsSlice) >= preLoadLimit && len(accountsSlice) > runtime.NumCPU() {
objsChan := make(chan []*StateObject, runtime.NumCPU())
Expand All @@ -688,6 +688,7 @@ func (s *StateDB) TryPreload(block *types.Block, signer types.Signer) {
}
}
}
overheadCost = time.Since(start)
}

func (s *StateDB) preloadStateObject(address []common.Address) []*StateObject {
Expand Down Expand Up @@ -760,7 +761,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject {
err error
)
if s.snap != nil {
if metrics.EnableIORecord {
if metrics.EnabledExpensive {
defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now())
}
var acc *snapshot.Account
Expand Down Expand Up @@ -792,7 +793,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject {
}
s.trie = tr
}
if metrics.EnableIORecord {
if metrics.EnabledExpensive {
defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now())
}
enc, err := s.trie.TryGet(addr.Bytes())
Expand Down
7 changes: 4 additions & 3 deletions core/state/trie_prefetcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,27 +187,28 @@ func (p *triePrefetcher) prefetch(root common.Hash, keys [][]byte, accountHash c
if p.fetches != nil {
return
}
var overheadCost time.Duration
start := time.Now()
defer func() {
goid := cachemetrics.Goid()
isSyncMainProcess := cachemetrics.IsSyncMainRoutineID(goid)
isMinerMainProcess := cachemetrics.IsMinerMainRoutineID(goid)

if isSyncMainProcess {
overheadCost := time.Since(start)
syncNewPrefetchCost.Update(overheadCost)
syncNewPrefetchCounter.Inc(overheadCost.Nanoseconds())
}
if isMinerMainProcess {
overheadCost := time.Since(start)
mineNewPrefetchCost.Update(overheadCost)
minerNewPrefetchCounter.Inc(overheadCost.Nanoseconds())
}
}()
// Active fetcher, schedule the retrievals
fetcher := p.fetchers[root]
if fetcher == nil {
start := time.Now()
fetcher = newSubfetcher(p.db, root, accountHash)
p.fetchers[root] = fetcher
overheadCost = time.Since(start)
}
fetcher.schedule(keys)
}
Expand Down
2 changes: 0 additions & 2 deletions metrics/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@ package metrics
type Config struct {
Enabled bool `toml:",omitempty"`
EnabledExpensive bool `toml:",omitempty"`
EnableIORecord bool `toml:",omitempty"`
DisablePrefetch bool `toml:",omitempty"`
HTTP string `toml:",omitempty"`
Port int `toml:",omitempty"`
EnableInfluxDB bool `toml:",omitempty"`
Expand Down
21 changes: 0 additions & 21 deletions metrics/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,19 +26,11 @@ var Enabled = false
// for health monitoring and debug metrics that might impact runtime performance.
var EnabledExpensive = false

// EnableIORecord is flag to determine whether record of accumulated IO time statedb
var EnableIORecord = false

// DisablePrefetch is flag to determine whether disable prefetch in IO process
var DisablePrefetch = false

// enablerFlags is the CLI flag names to use to enable metrics collections.
var enablerFlags = []string{"metrics"}

// expensiveEnablerFlags is the CLI flag names to use to enable metrics collections.
var expensiveEnablerFlags = []string{"metrics.expensive"}
var prefetchDisablerFlags = []string{"metrics.noprefetch"}
var ioRecordEnablerFlags = []string{"metrics.iorecord"}

// Init enables or disables the metrics system. Since we need this to run before
// any other code gets to create meters and timers, we'll actually do an ugly hack
Expand All @@ -60,19 +52,6 @@ func init() {
}
}

for _, enabler := range prefetchDisablerFlags {
if !DisablePrefetch && flag == enabler {
log.Info("disable prefetch when metrics collection")
DisablePrefetch = true
}
}

for _, enabler := range ioRecordEnablerFlags {
if !EnableIORecord && flag == enabler {
log.Info("Enabling io record when metrics collection")
EnableIORecord = true
}
}
}
}

Expand Down

0 comments on commit be7b436

Please sign in to comment.