Skip to content

Commit

Permalink
Merge pull request #538 from deso-protocol/mf/generate-pos-leader-sch…
Browse files Browse the repository at this point in the history
…edules

Mf/generate pos leader schedules
  • Loading branch information
mattfoley8 authored May 30, 2023
2 parents 905624c + a0567a7 commit bd3b99c
Show file tree
Hide file tree
Showing 3 changed files with 375 additions and 0 deletions.
11 changes: 11 additions & 0 deletions lib/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -607,6 +607,11 @@ type DeSoParams struct {
// TODO: Move this to GlobalParamsEntry.
ValidatorJailEpochDuration uint64

// LeaderScheduleMaxNumValidators is the maximum number of validators that
// are included when generating a new Proof-of-Stake leader schedule.
// TODO: Move this to GlobalParamsEntry.
LeaderScheduleMaxNumValidators uint64

ForkHeights ForkHeights

EncoderMigrationHeights *EncoderMigrationHeights
Expand Down Expand Up @@ -985,6 +990,9 @@ var DeSoMainnetParams = DeSoParams{
// Jailed validators can be unjailed after a minimum of N elapsed epochs.
ValidatorJailEpochDuration: uint64(3),

// The max number of validators included in a leader schedule.
LeaderScheduleMaxNumValidators: uint64(100),

ForkHeights: MainnetForkHeights,
EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights),
EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&MainnetForkHeights),
Expand Down Expand Up @@ -1219,6 +1227,9 @@ var DeSoTestnetParams = DeSoParams{
// Jailed validators can be unjailed after a minimum of N elapsed epochs.
ValidatorJailEpochDuration: uint64(3),

// The max number of validators included in a leader schedule.
LeaderScheduleMaxNumValidators: uint64(100),

ForkHeights: TestnetForkHeights,
EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights),
EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&TestnetForkHeights),
Expand Down
106 changes: 106 additions & 0 deletions lib/pos_leader_schedule.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
package lib

import (
"crypto/sha256"
"github.com/holiman/uint256"
"github.com/pkg/errors"
)

func (bav *UtxoView) GenerateLeaderSchedule() ([]*PKID, error) {
// Retrieve CurrentRandomSeedHash.
currentRandomSeedHash, err := bav.GetCurrentRandomSeedHash()
if err != nil {
return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving CurrentRandomSeedHash: ")
}

// Retrieve top, active validators ordered by stake.
validatorEntries, err := bav.GetTopActiveValidatorsByStake(bav.Params.LeaderScheduleMaxNumValidators)
if err != nil {
return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving top ValidatorEntries: ")
}
if len(validatorEntries) == 0 {
return []*PKID{}, nil
}

// Sum TotalStakeAmountNanos.
totalStakeAmountNanos := uint256.NewInt()
for _, validatorEntry := range validatorEntries {
totalStakeAmountNanos, err = SafeUint256().Add(totalStakeAmountNanos, validatorEntry.TotalStakeAmountNanos)
if err != nil {
return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error summing TotalStakeAmountNanos: ")
}
}

// Pseudocode for leader-selection algorithm:
// Note this is an O(N^2) algorithm where N is the number of validators we include.
// While len(LeaderSchedule) < len(ValidatorEntries):
// Hash the CurrentRandomSeedHash and generate a new RandomUint256.
// Take RandomUint256 modulo TotalStakeAmountNanos.
// For each ValidatorEntry:
// Skip if ValidatorPKID has already been added to the leader schedule.
// If the sum of the ValidatorEntry.TotalStakeAmountNanos seen so far >= RandomUint256:
// Add ValidatorPKID to LeaderSchedule.
// TotalStakeAmountNanos -= ValidatorEntry.TotalStakeAmountNanos.
// Break out of the inner loop.
var leaderSchedule []*PKID

// We also track a set of ValidatorPKIDs that have already been
// added to the LeaderSchedule so that we can skip them when
// iterating over ValidatorEntries in O(1) time.
leaderSchedulePKIDs := NewSet([]PKID{})

for len(leaderSchedule) < len(validatorEntries) {
// Hash the CurrentRandomSeedHash each iteration. This generates
// multiple predictable pseudorandom values from the same seed.
currentRandomSHA256 := sha256.Sum256(currentRandomSeedHash.ToBytes())
currentRandomSeedHash, err = (&RandomSeedHash{}).FromBytes(currentRandomSHA256[:])
if err != nil {
return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error hashing CurrentRandomSeedHash: ")
}

// Take RandomUint256 % TotalStakeAmountNanos.
randomUint256 := uint256.NewInt().Mod(currentRandomSeedHash.ToUint256(), totalStakeAmountNanos)

// Keep track of the stake seen so far in this loop.
sumStakeAmountNanos := uint256.NewInt()

for _, validatorEntry := range validatorEntries {
// Skip if ValidatorEntry has already been added to the leader schedule.
if leaderSchedulePKIDs.Includes(*validatorEntry.ValidatorPKID) {
continue
}

// Sum the ValidatorEntry.TotalStakeAmountNanos to the stake seen so far.
sumStakeAmountNanos, err = SafeUint256().Add(sumStakeAmountNanos, validatorEntry.TotalStakeAmountNanos)
if err != nil {
return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error summing TotalStakeAmountNanos: ")
}

// If the sum of the stake seen so far is less than the RandomUint256, skip this validator.
if sumStakeAmountNanos.Lt(randomUint256) {
continue
}

// If we get to this point, the current validator is the
// one we should add to the leader schedule next.

// Add the current ValidatorPKID to the leaderSchedule.
leaderSchedule = append(leaderSchedule, validatorEntry.ValidatorPKID)
leaderSchedulePKIDs.Add(*validatorEntry.ValidatorPKID)

// Subtract the ValidatorEntry.TotalStakeAmountNanos from the TotalStakeAmountNanos.
totalStakeAmountNanos, err = SafeUint256().Sub(totalStakeAmountNanos, validatorEntry.TotalStakeAmountNanos)
if err != nil {
return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error subtracting TotalStakeAmountNanos: ")
}

// The current validator has been added to the leader schedule.
// Break out of this inner loop, generate a new RandomUint256,
// and find the next stake-weighted validator to add to the
// leader schedule.
break
}
}

return leaderSchedule, nil
}
258 changes: 258 additions & 0 deletions lib/pos_leader_schedule_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,258 @@
//go:build relic

package lib

import (
"crypto/sha256"
"fmt"
"github.com/holiman/uint256"
"github.com/stretchr/testify/require"
"testing"
)

func TestGenerateLeaderSchedule(t *testing.T) {
// Initialize fork heights.
setBalanceModelBlockHeights()
defer resetBalanceModelBlockHeights()

// Initialize test chain and miner.
chain, params, db := NewLowDifficultyBlockchain(t)
mempool, miner := NewTestMiner(t, chain, params, true)

// Initialize PoS txn types block height.
params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1)
GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(&params.ForkHeights)
GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(&params.ForkHeights)

// Mine a few blocks to give the senderPkString some money.
for ii := 0; ii < 10; ii++ {
_, err := miner.MineAndProcessSingleBlock(0, mempool)
require.NoError(t, err)
}

// We build the testMeta obj after mining blocks so that we save the correct block height.
blockHeight := uint64(chain.blockTip().Height + 1)
testMeta := &TestMeta{
t: t,
chain: chain,
params: params,
db: db,
mempool: mempool,
miner: miner,
savedHeight: uint32(blockHeight),
feeRateNanosPerKb: uint64(101),
}

_registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3)
_registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e3)
_registerOrTransferWithTestMeta(testMeta, "m2", senderPkString, m2Pub, senderPrivString, 1e3)
_registerOrTransferWithTestMeta(testMeta, "m3", senderPkString, m3Pub, senderPrivString, 1e3)
_registerOrTransferWithTestMeta(testMeta, "m4", senderPkString, m4Pub, senderPrivString, 1e3)
_registerOrTransferWithTestMeta(testMeta, "m5", senderPkString, m5Pub, senderPrivString, 1e3)
_registerOrTransferWithTestMeta(testMeta, "m6", senderPkString, m6Pub, senderPrivString, 1e3)
_registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3)

m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID
m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID
m2PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m2PkBytes).PKID
m3PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m3PkBytes).PKID
m4PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m4PkBytes).PKID
m5PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m5PkBytes).PKID
m6PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m6PkBytes).PKID

// Helper utils
newUtxoView := func() *UtxoView {
utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot)
require.NoError(t, err)
return utxoView
}

registerValidator := func(publicKey string, privateKey string, stakeAmountNanos uint64) {
// Convert PublicKeyBase58Check to PublicKeyBytes.
pkBytes, _, err := Base58CheckDecode(publicKey)
require.NoError(t, err)

// Validator registers.
votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, pkBytes, blockHeight)
registerMetadata := &RegisterAsValidatorMetadata{
Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))},
VotingPublicKey: votingPublicKey,
VotingPublicKeySignature: votingSignature,
}
_, err = _submitRegisterAsValidatorTxn(testMeta, publicKey, privateKey, registerMetadata, nil, true)
require.NoError(t, err)

// Validator stakes to himself.
if stakeAmountNanos == 0 {
return
}
stakeMetadata := &StakeMetadata{
ValidatorPublicKey: NewPublicKey(pkBytes),
StakeAmountNanos: uint256.NewInt().SetUint64(stakeAmountNanos),
}
_, err = _submitStakeTxn(testMeta, publicKey, privateKey, stakeMetadata, nil, true)
require.NoError(t, err)
}

setCurrentRandomSeedHash := func(seed string) {
randomSHA256 := sha256.Sum256([]byte(seed))
randomSeedHash, err := (&RandomSeedHash{}).FromBytes(randomSHA256[:])
require.NoError(t, err)
tmpUtxoView := newUtxoView()
tmpUtxoView._setCurrentRandomSeedHash(randomSeedHash)
require.NoError(t, tmpUtxoView.FlushToDb(blockHeight))
}

testGenerateLeaderSchedule := func(expectedOrder []*PKID) {
// We test that GenerateLeaderSchedule() is idempotent by running it 10 times.
// Given the same CurrentRandomSeedHash and the same stake-weighted validators,
// we verify that we generate the same leader schedule each time.
for ii := 0; ii < 10; ii++ {
leaderSchedule, err := newUtxoView().GenerateLeaderSchedule()
require.NoError(t, err)
require.Len(t, leaderSchedule, len(expectedOrder))

for index, pkid := range leaderSchedule {
require.Equal(t, pkid, expectedOrder[index])
}
}
}

// Seed a CurrentEpochEntry.
tmpUtxoView := newUtxoView()
tmpUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10})
require.NoError(t, tmpUtxoView.FlushToDb(blockHeight))

{
// ParamUpdater set min fee rate
params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true
_updateGlobalParamsEntryWithTestMeta(
testMeta,
testMeta.feeRateNanosPerKb,
paramUpdaterPub,
paramUpdaterPriv,
-1,
int64(testMeta.feeRateNanosPerKb),
-1,
-1,
-1,
)
}
{
// Test GenerateLeaderSchedule() edge case: no registered validators.
leaderSchedule, err := newUtxoView().GenerateLeaderSchedule()
require.NoError(t, err)
require.Empty(t, leaderSchedule)
}
{
// m0 registers as validator.
registerValidator(m0Pub, m0Priv, 0)
}
{
// Test GenerateLeaderSchedule() edge case: one registered validator with zero stake.
leaderSchedule, err := newUtxoView().GenerateLeaderSchedule()
require.NoError(t, err)
require.Empty(t, leaderSchedule)
}
{
// m0 stakes to himself.
registerValidator(m0Pub, m0Priv, 10)
}
{
// Test GenerateLeaderSchedule() edge case: one registered validator with non-zero stake.
leaderSchedule, err := newUtxoView().GenerateLeaderSchedule()
require.NoError(t, err)
require.Len(t, leaderSchedule, 1)
require.Equal(t, leaderSchedule[0], m0PKID)
}
{
// m1 registers and stakes to himself.
registerValidator(m1Pub, m1Priv, 20)
}
{
// Test GenerateLeaderSchedule() edge case: two registered validators with non-zero stake.
leaderSchedule, err := newUtxoView().GenerateLeaderSchedule()
require.NoError(t, err)
require.Len(t, leaderSchedule, 2)
require.Equal(t, leaderSchedule[0], m1PKID)
require.Equal(t, leaderSchedule[1], m0PKID)
}
{
// All remaining validators register and stake to themselves.
registerValidator(m2Pub, m2Priv, 30)
registerValidator(m3Pub, m3Priv, 40)
registerValidator(m4Pub, m4Priv, 500)
registerValidator(m5Pub, m5Priv, 600)
registerValidator(m6Pub, m6Priv, 700)
}
{
// Verify GetTopActiveValidatorsByStake.
validatorEntries, err := newUtxoView().GetTopActiveValidatorsByStake(10)
require.NoError(t, err)
require.Len(t, validatorEntries, 7)
require.True(t, validatorEntries[0].ValidatorPKID.Eq(m6PKID))
require.True(t, validatorEntries[1].ValidatorPKID.Eq(m5PKID))
require.True(t, validatorEntries[2].ValidatorPKID.Eq(m4PKID))
require.True(t, validatorEntries[3].ValidatorPKID.Eq(m3PKID))
require.True(t, validatorEntries[4].ValidatorPKID.Eq(m2PKID))
require.True(t, validatorEntries[5].ValidatorPKID.Eq(m1PKID))
require.True(t, validatorEntries[6].ValidatorPKID.Eq(m0PKID))
require.Equal(t, validatorEntries[0].TotalStakeAmountNanos.Uint64(), uint64(700))
require.Equal(t, validatorEntries[1].TotalStakeAmountNanos.Uint64(), uint64(600))
require.Equal(t, validatorEntries[2].TotalStakeAmountNanos.Uint64(), uint64(500))
require.Equal(t, validatorEntries[3].TotalStakeAmountNanos.Uint64(), uint64(40))
require.Equal(t, validatorEntries[4].TotalStakeAmountNanos.Uint64(), uint64(30))
require.Equal(t, validatorEntries[5].TotalStakeAmountNanos.Uint64(), uint64(20))
require.Equal(t, validatorEntries[6].TotalStakeAmountNanos.Uint64(), uint64(10))
}
{
// Test GenerateLeaderSchedule().
testGenerateLeaderSchedule([]*PKID{m6PKID, m5PKID, m4PKID, m2PKID, m3PKID, m1PKID, m0PKID})
}
{
// Seed a new CurrentRandomSeedHash.
setCurrentRandomSeedHash("3b4b028b-6a7c-4b38-bea3-a5f59b34e02d")
// Test GenerateLeaderSchedule().
testGenerateLeaderSchedule([]*PKID{m6PKID, m5PKID, m3PKID, m4PKID, m2PKID, m0PKID, m1PKID})
}
{
// Seed a new CurrentRandomSeedHash.
setCurrentRandomSeedHash("b4b38eaf-216d-4132-8725-a481baaf87cc")
// Test GenerateLeaderSchedule().
testGenerateLeaderSchedule([]*PKID{m4PKID, m5PKID, m6PKID, m3PKID, m1PKID, m2PKID, m0PKID})
}
{
// Seed a new CurrentRandomSeedHash.
setCurrentRandomSeedHash("7c87f290-d9ec-4cb4-ad47-c64c8ca46f0e")
// Test GenerateLeaderSchedule().
testGenerateLeaderSchedule([]*PKID{m6PKID, m2PKID, m4PKID, m5PKID, m3PKID, m1PKID, m0PKID})
}
{
// Seed a new CurrentRandomSeedHash.
setCurrentRandomSeedHash("0999a3ce-15e4-455a-b061-6081b88b237d")
// Test GenerateLeaderSchedule().
testGenerateLeaderSchedule([]*PKID{m6PKID, m5PKID, m4PKID, m2PKID, m1PKID, m0PKID, m3PKID})
}
{
// Seed a new CurrentRandomSeedHash.
setCurrentRandomSeedHash("dbfffc42-3c40-49c4-a3df-cfbd2606cce2")
// Test GenerateLeaderSchedule().
testGenerateLeaderSchedule([]*PKID{m6PKID, m5PKID, m4PKID, m3PKID, m0PKID, m2PKID, m1PKID})
}
{
// Seed a new CurrentRandomSeedHash.
setCurrentRandomSeedHash("ceea0ad8-7277-4468-a0a1-8bacb78b01ca")
// Test GenerateLeaderSchedule().
testGenerateLeaderSchedule([]*PKID{m3PKID, m5PKID, m6PKID, m4PKID, m2PKID, m1PKID, m0PKID})
}
{
// Test changing params.LeaderScheduleMaxNumValidators.
params.LeaderScheduleMaxNumValidators = 5
leaderSchedule, err := newUtxoView().GenerateLeaderSchedule()
require.NoError(t, err)
require.Len(t, leaderSchedule, 5)
}

// Test rollbacks.
_executeAllTestRollbackAndFlush(testMeta)
}

0 comments on commit bd3b99c

Please sign in to comment.