Skip to content

Commit

Permalink
Unify log fields (#13654)
Browse files Browse the repository at this point in the history
* unify fields

* fix tests
  • Loading branch information
rkapka authored Feb 22, 2024
1 parent 7a9608e commit 0b261cb
Show file tree
Hide file tree
Showing 45 changed files with 190 additions and 190 deletions.
8 changes: 4 additions & 4 deletions api/client/beacon/checkpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,10 +108,10 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
}

log.
WithField("block_slot", b.Block().Slot()).
WithField("state_slot", s.Slot()).
WithField("state_root", hexutil.Encode(sr[:])).
WithField("block_root", hexutil.Encode(br[:])).
WithField("blockSlot", b.Block().Slot()).
WithField("stateSlot", s.Slot()).
WithField("stateRoot", hexutil.Encode(sr[:])).
WithField("blockRoot", hexutil.Encode(br[:])).
Info("Downloaded checkpoint sync state and block.")
return &OriginData{
st: s,
Expand Down
4 changes: 2 additions & 2 deletions api/client/beacon/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -310,8 +310,8 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*stru
for _, failure := range errorJson.Failures {
w := request[failure.Index].Message
log.WithFields(log.Fields{
"validator_index": w.ValidatorIndex,
"withdrawal_address": w.ToExecutionAddress,
"validatorIndex": w.ValidatorIndex,
"withdrawalAddress": w.ToExecutionAddress,
}).Error(failure.Message)
}
return errors.Errorf("POST error %d: %s", errorJson.Code, errorJson.Message)
Expand Down
8 changes: 4 additions & 4 deletions api/client/builder/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,8 @@ func (*requestLogger) observe(r *http.Request) (e error) {
b := bytes.NewBuffer(nil)
if r.Body == nil {
log.WithFields(log.Fields{
"body-base64": "(nil value)",
"url": r.URL.String(),
"bodyBase64": "(nil value)",
"url": r.URL.String(),
}).Info("builder http request")
return nil
}
Expand All @@ -74,8 +74,8 @@ func (*requestLogger) observe(r *http.Request) (e error) {
}
r.Body = io.NopCloser(b)
log.WithFields(log.Fields{
"body-base64": string(body),
"url": r.URL.String(),
"bodyBase64": string(body),
"url": r.URL.String(),
}).Info("builder http request")

return nil
Expand Down
2 changes: 1 addition & 1 deletion beacon-chain/blockchain/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -419,7 +419,7 @@ func (s *Service) startFromExecutionChain() error {
log.Error("event data is not type *statefeed.ChainStartedData")
return
}
log.WithField("starttime", data.StartTime).Debug("Received chain start event")
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
s.onExecutionChainStart(s.ctx, data.StartTime)
return
}
Expand Down
8 changes: 4 additions & 4 deletions beacon-chain/cache/depositcache/deposits_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,10 +74,10 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
defer span.End()
if d == nil {
log.WithFields(logrus.Fields{
"block": blockNum,
"deposit": d,
"index": index,
"deposit root": hex.EncodeToString(depositRoot[:]),
"block": blockNum,
"deposit": d,
"index": index,
"depositRoot": hex.EncodeToString(depositRoot[:]),
}).Warn("Ignoring nil deposit insertion")
return errors.New("nil deposit inserted into the cache")
}
Expand Down
8 changes: 4 additions & 4 deletions beacon-chain/cache/depositsnapshot/deposit_inserter.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,10 @@ func (c *Cache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum ui
}
if d == nil {
log.WithFields(logrus.Fields{
"block": blockNum,
"deposit": d,
"index": index,
"deposit root": hex.EncodeToString(depositRoot[:]),
"block": blockNum,
"deposit": d,
"index": index,
"depositRoot": hex.EncodeToString(depositRoot[:]),
}).Warn("Ignoring nil deposit insertion")
return errors.New("nil deposit inserted into the cache")
}
Expand Down
2 changes: 1 addition & 1 deletion beacon-chain/execution/log_processing.go
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
}

log.WithFields(logrus.Fields{
"ChainStartTime": chainStartTime,
"chainStartTime": chainStartTime,
}).Info("Minimum number of validators reached for beacon-chain to start")
s.cfg.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.ChainStarted,
Expand Down
2 changes: 1 addition & 1 deletion beacon-chain/execution/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -639,7 +639,7 @@ func (s *Service) logTillChainStart(ctx context.Context) {
}

fields := logrus.Fields{
"Additional validators needed": valNeeded,
"additionalValidatorsNeeded": valNeeded,
}
if secondsLeft > 0 {
fields["Generating genesis state in"] = time.Duration(secondsLeft) * time.Second
Expand Down
36 changes: 18 additions & 18 deletions beacon-chain/monitor/process_attestation.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,11 +44,11 @@ func attestingIndices(ctx context.Context, state state.BeaconState, att *ethpb.A
// logMessageTimelyFlagsForIndex returns the log message with performance info for the attestation (head, source, target)
func logMessageTimelyFlagsForIndex(idx primitives.ValidatorIndex, data *ethpb.AttestationData) logrus.Fields {
return logrus.Fields{
"ValidatorIndex": idx,
"Slot": data.Slot,
"Source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)),
"Target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)),
"Head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)),
"validatorIndex": idx,
"slot": data.Slot,
"source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)),
"target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)),
"head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)),
}
}

Expand Down Expand Up @@ -146,12 +146,12 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be
aggregatedPerf.totalCorrectTarget++
}
}
logFields["CorrectHead"] = latestPerf.timelyHead
logFields["CorrectSource"] = latestPerf.timelySource
logFields["CorrectTarget"] = latestPerf.timelyTarget
logFields["InclusionSlot"] = latestPerf.inclusionSlot
logFields["NewBalance"] = balance
logFields["BalanceChange"] = balanceChg
logFields["correctHead"] = latestPerf.timelyHead
logFields["correctSource"] = latestPerf.timelySource
logFields["correctTarget"] = latestPerf.timelyTarget
logFields["inclusionSlot"] = latestPerf.inclusionSlot
logFields["newBalance"] = balance
logFields["balanceChange"] = balanceChg

s.latestPerformance[primitives.ValidatorIndex(idx)] = latestPerf
s.aggregatedPerformance[primitives.ValidatorIndex(idx)] = aggregatedPerf
Expand All @@ -167,7 +167,7 @@ func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb
root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if st == nil {
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
"Skipping unaggregated attestation due to state not found in cache")
return
}
Expand All @@ -190,13 +190,13 @@ func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.A
defer s.Unlock()
if s.trackedIndex(att.AggregatorIndex) {
log.WithFields(logrus.Fields{
"AggregatorIndex": att.AggregatorIndex,
"Slot": att.Aggregate.Data.Slot,
"BeaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
"aggregatorIndex": att.AggregatorIndex,
"slot": att.Aggregate.Data.Slot,
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.BeaconBlockRoot)),
"SourceRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
"sourceRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.Source.Root)),
"TargetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.Target.Root)),
}).Info("Processed attestation aggregation")
aggregatedPerf := s.aggregatedPerformance[att.AggregatorIndex]
Expand All @@ -209,7 +209,7 @@ func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.A
copy(root[:], att.Aggregate.Data.BeaconBlockRoot)
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if st == nil {
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
"Skipping aggregated attestation due to state not found in cache")
return
}
Expand Down
20 changes: 10 additions & 10 deletions beacon-chain/monitor/process_attestation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ func TestProcessIncludedAttestationTwoTracked(t *testing.T) {
AggregationBits: bitfield.Bitlist{0b11, 0b1},
}
s.processIncludedAttestation(context.Background(), state, att)
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
wanted1 := "\"Attestation included\" balanceChange=0 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
wanted2 := "\"Attestation included\" balanceChange=100000000 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2)
}
Expand Down Expand Up @@ -124,8 +124,8 @@ func TestProcessUnaggregatedAttestationStateCached(t *testing.T) {
}
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
s.processUnaggregatedAttestation(context.Background(), att)
wanted1 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
wanted2 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
wanted1 := "\"Processed unaggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
wanted2 := "\"Processed unaggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2)
}
Expand Down Expand Up @@ -162,7 +162,7 @@ func TestProcessAggregatedAttestationStateNotCached(t *testing.T) {
},
}
s.processAggregatedAttestation(ctx, att)
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x000000000000 Slot=1 SourceRoot=0x68656c6c6f2d TargetRoot=0x68656c6c6f2d prefix=monitor")
require.LogsContain(t, hook, "\"Processed attestation aggregation\" aggregatorIndex=2 beaconBlockRoot=0x000000000000 prefix=monitor slot=1 sourceRoot=0x68656c6c6f2d targetRoot=0x68656c6c6f2d")
require.LogsContain(t, hook, "Skipping aggregated attestation due to state not found in cache")
logrus.SetLevel(logrus.InfoLevel)
}
Expand Down Expand Up @@ -200,9 +200,9 @@ func TestProcessAggregatedAttestationStateCached(t *testing.T) {

require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
s.processAggregatedAttestation(ctx, att)
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x68656c6c6f2d Slot=1 SourceRoot=0x68656c6c6f2d TargetRoot=0x68656c6c6f2d prefix=monitor")
require.LogsContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor")
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor")
require.LogsContain(t, hook, "\"Processed attestation aggregation\" aggregatorIndex=2 beaconBlockRoot=0x68656c6c6f2d prefix=monitor slot=1 sourceRoot=0x68656c6c6f2d targetRoot=0x68656c6c6f2d")
require.LogsContain(t, hook, "\"Processed aggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2")
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12")
}

func TestProcessAttestations(t *testing.T) {
Expand Down Expand Up @@ -240,8 +240,8 @@ func TestProcessAttestations(t *testing.T) {
wrappedBlock, err := blocks.NewBeaconBlock(block)
require.NoError(t, err)
s.processAttestations(ctx, state, wrappedBlock)
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
wanted1 := "\"Attestation included\" balanceChange=0 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
wanted2 := "\"Attestation included\" balanceChange=100000000 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2)

Expand Down
72 changes: 36 additions & 36 deletions beacon-chain/monitor/process_block.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func (s *Service) processBlock(ctx context.Context, b interfaces.ReadOnlySignedB
}
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if st == nil {
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
"Skipping block collection due to state not found in cache")
return
}
Expand Down Expand Up @@ -90,13 +90,13 @@ func (s *Service) processProposedBlock(state state.BeaconState, root [32]byte, b

parentRoot := blk.ParentRoot()
log.WithFields(logrus.Fields{
"ProposerIndex": blk.ProposerIndex(),
"Slot": blk.Slot(),
"Version": blk.Version(),
"ParentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(parentRoot[:])),
"BlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
"NewBalance": balance,
"BalanceChange": balanceChg,
"proposerIndex": blk.ProposerIndex(),
"slot": blk.Slot(),
"version": blk.Version(),
"parentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(parentRoot[:])),
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
"newBalance": balance,
"balanceChange": balanceChg,
}).Info("Proposed beacon block was included")
}
}
Expand All @@ -109,11 +109,11 @@ func (s *Service) processSlashings(blk interfaces.ReadOnlyBeaconBlock) {
idx := slashing.Header_1.Header.ProposerIndex
if s.trackedIndex(idx) {
log.WithFields(logrus.Fields{
"ProposerIndex": idx,
"Slot": blk.Slot(),
"SlashingSlot": slashing.Header_1.Header.Slot,
"BodyRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_1.Header.BodyRoot)),
"BodyRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_2.Header.BodyRoot)),
"proposerIndex": idx,
"slot": blk.Slot(),
"slashingSlot": slashing.Header_1.Header.Slot,
"bodyRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_1.Header.BodyRoot)),
"bodyRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_2.Header.BodyRoot)),
}).Info("Proposer slashing was included")
}
}
Expand All @@ -122,16 +122,16 @@ func (s *Service) processSlashings(blk interfaces.ReadOnlyBeaconBlock) {
for _, idx := range blocks.SlashableAttesterIndices(slashing) {
if s.trackedIndex(primitives.ValidatorIndex(idx)) {
log.WithFields(logrus.Fields{
"AttesterIndex": idx,
"BlockInclusionSlot": blk.Slot(),
"AttestationSlot1": slashing.Attestation_1.Data.Slot,
"BeaconBlockRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_1.Data.BeaconBlockRoot)),
"SourceEpoch1": slashing.Attestation_1.Data.Source.Epoch,
"TargetEpoch1": slashing.Attestation_1.Data.Target.Epoch,
"AttestationSlot2": slashing.Attestation_2.Data.Slot,
"BeaconBlockRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_2.Data.BeaconBlockRoot)),
"SourceEpoch2": slashing.Attestation_2.Data.Source.Epoch,
"TargetEpoch2": slashing.Attestation_2.Data.Target.Epoch,
"attesterIndex": idx,
"blockInclusionSlot": blk.Slot(),
"attestationSlot1": slashing.Attestation_1.Data.Slot,
"beaconBlockRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_1.Data.BeaconBlockRoot)),
"sourceEpoch1": slashing.Attestation_1.Data.Source.Epoch,
"targetEpoch1": slashing.Attestation_1.Data.Target.Epoch,
"attestationSlot2": slashing.Attestation_2.Data.Slot,
"beaconBlockRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_2.Data.BeaconBlockRoot)),
"sourceEpoch2": slashing.Attestation_2.Data.Source.Epoch,
"targetEpoch2": slashing.Attestation_2.Data.Target.Epoch,
}).Info("Attester slashing was included")
}
}
Expand Down Expand Up @@ -159,19 +159,19 @@ func (s *Service) logAggregatedPerformance() {
percentCorrectTarget := float64(p.totalCorrectTarget) / float64(p.totalAttestedCount)

log.WithFields(logrus.Fields{
"ValidatorIndex": idx,
"StartEpoch": p.startEpoch,
"StartBalance": p.startBalance,
"TotalRequested": p.totalRequestedCount,
"AttestationInclusion": fmt.Sprintf("%.2f%%", percentAtt*100),
"BalanceChangePct": fmt.Sprintf("%.2f%%", percentBal*100),
"CorrectlyVotedSourcePct": fmt.Sprintf("%.2f%%", percentCorrectSource*100),
"CorrectlyVotedTargetPct": fmt.Sprintf("%.2f%%", percentCorrectTarget*100),
"CorrectlyVotedHeadPct": fmt.Sprintf("%.2f%%", percentCorrectHead*100),
"AverageInclusionDistance": fmt.Sprintf("%.1f", percentDistance),
"TotalProposedBlocks": p.totalProposedCount,
"TotalAggregations": p.totalAggregations,
"TotalSyncContributions": p.totalSyncCommitteeContributions,
"validatorIndex": idx,
"startEpoch": p.startEpoch,
"startBalance": p.startBalance,
"totalRequested": p.totalRequestedCount,
"attestationInclusion": fmt.Sprintf("%.2f%%", percentAtt*100),
"balanceChangePct": fmt.Sprintf("%.2f%%", percentBal*100),
"correctlyVotedSourcePct": fmt.Sprintf("%.2f%%", percentCorrectSource*100),
"correctlyVotedTargetPct": fmt.Sprintf("%.2f%%", percentCorrectTarget*100),
"correctlyVotedHeadPct": fmt.Sprintf("%.2f%%", percentCorrectHead*100),
"averageInclusionDistance": fmt.Sprintf("%.1f", percentDistance),
"totalProposedBlocks": p.totalProposedCount,
"totalAggregations": p.totalAggregations,
"totalSyncContributions": p.totalSyncCommitteeContributions,
}).Info("Aggregated performance since launch")
}
}
Loading

0 comments on commit 0b261cb

Please sign in to comment.