Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Peer scorer: fix weighted sorting #6981

Merged
merged 3 commits into from
Aug 13, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 7 additions & 6 deletions beacon-chain/p2p/peers/score_block_providers.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,20 +15,20 @@ import (

const (
// DefaultBlockProviderProcessedBatchWeight is a default reward weight of a processed batch of blocks.
DefaultBlockProviderProcessedBatchWeight = float64(0.05)
DefaultBlockProviderProcessedBatchWeight = float64(0.1)
// DefaultBlockProviderProcessedBlocksCap defines default value for processed blocks cap.
// e.g. 20 * 64 := 20 batches of size 64 (with 0.05 per batch reward, 20 batches result in score of 1.0).
DefaultBlockProviderProcessedBlocksCap = uint64(20 * 64)
DefaultBlockProviderProcessedBlocksCap = uint64(10 * 64)
// DefaultBlockProviderDecayInterval defines how often the decaying routine is called.
DefaultBlockProviderDecayInterval = 30 * time.Second
// DefaultBlockProviderDecay defines default blocks that are to be subtracted from stats on each
// decay interval. Effectively, this param provides minimum expected performance for a peer to remain
// high scorer.
DefaultBlockProviderDecay = uint64(5 * 64)
DefaultBlockProviderDecay = uint64(1 * 64)
// DefaultBlockProviderStalePeerRefreshInterval defines default interval at which peers should be given
// opportunity to provide blocks (their score gets boosted, up until they are selected for
// fetching).
DefaultBlockProviderStalePeerRefreshInterval = 1 * time.Minute
DefaultBlockProviderStalePeerRefreshInterval = 5 * time.Minute
)

// BlockProviderScorer represents block provider scoring service.
Expand Down Expand Up @@ -209,14 +209,15 @@ func (s *BlockProviderScorer) WeightSorted(
nextPID := func(weights map[peer.ID]float64) peer.ID {
totalWeight := 0
for _, w := range weights {
totalWeight += int(w)
// Factor by 100, to allow weights in (0; 1) range.
totalWeight += int(w * 100)
}
if totalWeight <= 0 {
return ""
}
rnd := r.Intn(totalWeight)
for pid, w := range weights {
rnd -= int(w)
rnd -= int(w * 100)
if rnd < 0 {
return pid
}
Expand Down
20 changes: 11 additions & 9 deletions beacon-chain/p2p/peers/score_block_providers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,9 @@ func TestPeerScorer_BlockProvider_Score(t *testing.T) {
{
name: "boost score of stale peer",
update: func(scorer *peers.BlockProviderScorer) {
batchWeight := scorer.Params().ProcessedBatchWeight
scorer.IncrementProcessedBlocks("peer1", batchSize*3)
assert.Equal(t, 0.05*3, scorer.Score("peer1"), "Unexpected score")
assert.Equal(t, roundScore(batchWeight*3), scorer.Score("peer1"), "Unexpected score")
scorer.Touch("peer1", roughtime.Now().Add(-1*scorer.Params().StalePeerRefreshInterval))
},
check: func(scorer *peers.BlockProviderScorer) {
Expand Down Expand Up @@ -85,23 +86,26 @@ func TestPeerScorer_BlockProvider_Score(t *testing.T) {
scorer.IncrementProcessedBlocks("peer1", batchSize)
},
check: func(scorer *peers.BlockProviderScorer) {
assert.Equal(t, roundScore(0.05), scorer.Score("peer1"), "Unexpected score")
batchWeight := scorer.Params().ProcessedBatchWeight
assert.Equal(t, roundScore(batchWeight), scorer.Score("peer1"), "Unexpected score")
},
},
{
name: "multiple batches",
update: func(scorer *peers.BlockProviderScorer) {
scorer.IncrementProcessedBlocks("peer1", batchSize*13)
scorer.IncrementProcessedBlocks("peer1", batchSize*7)
},
check: func(scorer *peers.BlockProviderScorer) {
assert.Equal(t, roundScore(0.05*13), scorer.Score("peer1"), "Unexpected score")
batchWeight := scorer.Params().ProcessedBatchWeight
assert.Equal(t, roundScore(batchWeight*7), scorer.Score("peer1"), "Unexpected score")
},
},
{
name: "maximum score cap",
update: func(scorer *peers.BlockProviderScorer) {
batchWeight := scorer.Params().ProcessedBatchWeight
scorer.IncrementProcessedBlocks("peer1", batchSize*2)
assert.Equal(t, roundScore(0.05*2), scorer.Score("peer1"), "Unexpected score")
assert.Equal(t, roundScore(batchWeight*2), scorer.Score("peer1"), "Unexpected score")
scorer.IncrementProcessedBlocks("peer1", scorer.Params().ProcessedBlocksCap)
},
check: func(scorer *peers.BlockProviderScorer) {
Expand All @@ -116,9 +120,7 @@ func TestPeerScorer_BlockProvider_Score(t *testing.T) {
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &peers.PeerScorerConfig{
BlockProviderScorerConfig: &peers.BlockProviderScorerConfig{
ProcessedBatchWeight: 0.05,
},
BlockProviderScorerConfig: &peers.BlockProviderScorerConfig{},
},
})
scorer := peerStatuses.Scorers().BlockProviderScorer()
Expand Down Expand Up @@ -150,7 +152,7 @@ func TestPeerScorer_BlockProvider_WeightSorted(t *testing.T) {
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
ScorerParams: &peers.PeerScorerConfig{
BlockProviderScorerConfig: &peers.BlockProviderScorerConfig{
ProcessedBatchWeight: 1,
ProcessedBatchWeight: 0.01,
},
},
})
Expand Down
27 changes: 14 additions & 13 deletions beacon-chain/p2p/peers/scorer_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,7 @@ func TestPeerScorer_PeerScorerManager_Score(t *testing.T) {
Threshold: 5,
},
BlockProviderScorerConfig: &peers.BlockProviderScorerConfig{
ProcessedBatchWeight: 0.05,
Decay: 64,
Decay: 64,
},
},
})
Expand Down Expand Up @@ -156,54 +155,56 @@ func TestPeerScorer_PeerScorerManager_Score(t *testing.T) {
s, pids := setupScorer()
s1 := s.BlockProviderScorer()
zeroScore := s.BlockProviderScorer().MaxScore()
batchWeight := s1.Params().ProcessedBatchWeight

// Partial batch.
s1.IncrementProcessedBlocks("peer1", batchSize/4)
assert.Equal(t, 0.0, s.Score("peer1"), "Unexpected %q score", "peer1")

// Single batch.
s1.IncrementProcessedBlocks("peer1", batchSize)
assert.DeepEqual(t, pack(s, 0.05, zeroScore, zeroScore), peerScores(s, pids), "Unexpected scores")
assert.DeepEqual(t, pack(s, batchWeight, zeroScore, zeroScore), peerScores(s, pids), "Unexpected scores")

// Multiple batches.
s1.IncrementProcessedBlocks("peer2", batchSize*4)
assert.DeepEqual(t, pack(s, 0.05, 0.05*4, zeroScore), peerScores(s, pids), "Unexpected scores")
assert.DeepEqual(t, pack(s, batchWeight, batchWeight*4, zeroScore), peerScores(s, pids), "Unexpected scores")

// Partial batch.
s1.IncrementProcessedBlocks("peer3", batchSize/2)
assert.DeepEqual(t, pack(s, 0.05, 0.05*4, 0), peerScores(s, pids), "Unexpected scores")
assert.DeepEqual(t, pack(s, batchWeight, batchWeight*4, 0), peerScores(s, pids), "Unexpected scores")

// See effect of decaying.
assert.Equal(t, batchSize+batchSize/4, s1.ProcessedBlocks("peer1"))
assert.Equal(t, batchSize*4, s1.ProcessedBlocks("peer2"))
assert.Equal(t, batchSize/2, s1.ProcessedBlocks("peer3"))
assert.DeepEqual(t, pack(s, 0.05, 0.05*4, 0), peerScores(s, pids), "Unexpected scores")
assert.DeepEqual(t, pack(s, batchWeight, batchWeight*4, 0), peerScores(s, pids), "Unexpected scores")
s1.Decay()
assert.Equal(t, batchSize/4, s1.ProcessedBlocks("peer1"))
assert.Equal(t, batchSize*3, s1.ProcessedBlocks("peer2"))
assert.Equal(t, uint64(0), s1.ProcessedBlocks("peer3"))
assert.DeepEqual(t, pack(s, 0, 0.05*3, 0), peerScores(s, pids), "Unexpected scores")
assert.DeepEqual(t, pack(s, 0, batchWeight*3, 0), peerScores(s, pids), "Unexpected scores")
})

t.Run("overall score", func(t *testing.T) {
// Full score, no penalty.
s, _ := setupScorer()
s1 := s.BlockProviderScorer()
s2 := s.BadResponsesScorer()
batchWeight := s1.Params().ProcessedBatchWeight

s1.IncrementProcessedBlocks("peer1", batchSize*10)
assert.Equal(t, roundScore(0.05*10), s1.Score("peer1"))
s1.IncrementProcessedBlocks("peer1", batchSize*5)
assert.Equal(t, roundScore(batchWeight*5), s1.Score("peer1"))
// Now, adjust score by introducing penalty for bad responses.
s2.Increment("peer1")
s2.Increment("peer1")
assert.Equal(t, -0.4, s2.Score("peer1"), "Unexpected bad responses score")
assert.Equal(t, roundScore(0.05*10), s1.Score("peer1"), "Unexpected block provider score")
assert.Equal(t, roundScore(0.05*10-0.4), s.Score("peer1"), "Unexpected overall score")
assert.Equal(t, roundScore(batchWeight*5), s1.Score("peer1"), "Unexpected block provider score")
assert.Equal(t, roundScore(batchWeight*5-0.4), s.Score("peer1"), "Unexpected overall score")
// If peer continues to misbehave, score becomes negative.
s2.Increment("peer1")
assert.Equal(t, -0.6, s2.Score("peer1"), "Unexpected bad responses score")
assert.Equal(t, roundScore(0.05*10), s1.Score("peer1"), "Unexpected block provider score")
assert.Equal(t, -0.1, s.Score("peer1"), "Unexpected overall score")
assert.Equal(t, roundScore(batchWeight*5), s1.Score("peer1"), "Unexpected block provider score")
assert.Equal(t, roundScore(batchWeight*5-0.6), s.Score("peer1"), "Unexpected overall score")
})
}

Expand Down