diff --git a/cmd/bootstrap/cmd/clusters.go b/cmd/bootstrap/cmd/clusters.go index eab252cde9b..8f6faa10505 100644 --- a/cmd/bootstrap/cmd/clusters.go +++ b/cmd/bootstrap/cmd/clusters.go @@ -5,6 +5,8 @@ import ( model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/assignment" + "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" ) @@ -23,20 +25,22 @@ func constructClusterAssignment(partnerNodes, internalNodes []model.NodeInfo, se internals = internals.DeterministicShuffle(seed) nClusters := flagCollectionClusters - assignments := make(flow.AssignmentList, nClusters) + identifierLists := make([]flow.IdentifierList, nClusters) // first, round-robin internal nodes into each cluster for i, node := range internals { - assignments[i%len(assignments)] = append(assignments[i%len(assignments)], node.NodeID) + identifierLists[i%len(identifierLists)] = append(identifierLists[i%len(identifierLists)], node.NodeID) } // next, round-robin partner nodes into each cluster for i, node := range partners { - assignments[i%len(assignments)] = append(assignments[i%len(assignments)], node.NodeID) + identifierLists[i%len(identifierLists)] = append(identifierLists[i%len(identifierLists)], node.NodeID) } + assignments := assignment.FromIdentifierLists(identifierLists) + collectors := append(partners, internals...) - clusters, err := flow.NewClusterList(assignments, collectors) + clusters, err := factory.NewClusterList(assignments, collectors) if err != nil { log.Fatal().Err(err).Msg("could not create cluster list") } diff --git a/cmd/bootstrap/cmd/seal.go b/cmd/bootstrap/cmd/seal.go index ffe8a7e285a..91533377a0e 100644 --- a/cmd/bootstrap/cmd/seal.go +++ b/cmd/bootstrap/cmd/seal.go @@ -7,6 +7,7 @@ import ( "github.com/onflow/flow-go/model/dkg" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/order" + "github.com/onflow/flow-go/module/signature" ) func constructRootResultAndSeal( @@ -43,9 +44,24 @@ func constructRootResultAndSeal( RandomSource: flagBootstrapRandomSeed, } + qcsWithSignerIDs := make([]*flow.QuorumCertificateWithSignerIDs, 0, len(clusterQCs)) + for i, clusterQC := range clusterQCs { + members := assignments[i] + signerIDs, err := signature.DecodeSignerIndicesToIdentifiers(members, clusterQC.SignerIndices) + if err != nil { + log.Fatal().Err(err).Msgf("could not decode signer IDs from clusterQC at index %v", i) + } + qcsWithSignerIDs = append(qcsWithSignerIDs, &flow.QuorumCertificateWithSignerIDs{ + View: clusterQC.View, + BlockID: clusterQC.BlockID, + SignerIDs: signerIDs, + SigData: clusterQC.SigData, + }) + } + epochCommit := &flow.EpochCommit{ Counter: flagEpochCounter, - ClusterQCs: flow.ClusterQCVoteDatasFromQCs(clusterQCs), + ClusterQCs: flow.ClusterQCVoteDatasFromQCs(qcsWithSignerIDs), DKGGroupKey: dkgData.PubGroupKey, DKGParticipantKeys: dkgData.PubKeyShares, } diff --git a/cmd/bootstrap/run/block.go b/cmd/bootstrap/run/block.go index 3f4dbc63e96..be9908dfa1b 100644 --- a/cmd/bootstrap/run/block.go +++ b/cmd/bootstrap/run/block.go @@ -19,7 +19,7 @@ func GenerateRootBlock(chainID flow.ChainID, parentID flow.Identifier, height ui PayloadHash: payload.Hash(), Timestamp: timestamp, View: 0, - ParentVoterIDs: nil, + ParentVoterIndices: nil, ParentVoterSigData: nil, ProposerID: flow.ZeroID, ProposerSigData: nil, diff --git a/cmd/bootstrap/run/cluster_qc.go b/cmd/bootstrap/run/cluster_qc.go index 9030f2a571b..c8db89ab92e 100644 --- a/cmd/bootstrap/run/cluster_qc.go +++ b/cmd/bootstrap/run/cluster_qc.go @@ -15,6 +15,7 @@ import ( "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module/local" ) @@ -29,7 +30,8 @@ func GenerateClusterRootQC(signers []bootstrap.NodeInfo, allCommitteeMembers flo } // STEP 2: create VoteProcessor - committee, err := committees.NewStaticCommittee(allCommitteeMembers, flow.Identifier{}, nil, nil) + ordered := allCommitteeMembers.Sort(order.Canonical) + committee, err := committees.NewStaticCommittee(ordered, flow.Identifier{}, nil, nil) if err != nil { return nil, err } diff --git a/cmd/bootstrap/run/qc_test.go b/cmd/bootstrap/run/qc_test.go index 43e01d534cb..afc8849d329 100644 --- a/cmd/bootstrap/run/qc_test.go +++ b/cmd/bootstrap/run/qc_test.go @@ -9,6 +9,7 @@ import ( "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/utils/unittest" ) @@ -32,7 +33,7 @@ func TestGenerateRootQC(t *testing.T) { } func createSignerData(t *testing.T, n int) *ParticipantData { - identities := unittest.IdentityListFixture(n) + identities := unittest.IdentityListFixture(n).Sort(order.Canonical) networkingKeys := unittest.NetworkingKeys(n) stakingKeys := unittest.StakingKeys(n) diff --git a/cmd/util/cmd/epochs/cmd/reset_test.go b/cmd/util/cmd/epochs/cmd/reset_test.go index 5526e3c41fc..26dd26b408c 100644 --- a/cmd/util/cmd/epochs/cmd/reset_test.go +++ b/cmd/util/cmd/epochs/cmd/reset_test.go @@ -30,7 +30,7 @@ func TestReset_LocalSnapshot(t *testing.T) { unittest.RunWithTempDir(t, func(bootDir string) { // create a root snapshot - rootSnapshot := unittest.RootSnapshotFixture(unittest.IdentityListFixture(10)) + rootSnapshot := unittest.RootSnapshotFixture(unittest.IdentityListFixture(10, unittest.WithAllRoles())) // write snapshot to correct path in bootDir err := writeRootSnapshot(bootDir, rootSnapshot) @@ -62,7 +62,7 @@ func TestReset_LocalSnapshot(t *testing.T) { unittest.RunWithTempDir(t, func(bootDir string) { // create a root snapshot - rootSnapshot := unittest.RootSnapshotFixture(unittest.IdentityListFixture(10)) + rootSnapshot := unittest.RootSnapshotFixture(unittest.IdentityListFixture(10, unittest.WithAllRoles())) // write snapshot to correct path in bootDir err := writeRootSnapshot(bootDir, rootSnapshot) diff --git a/cmd/util/cmd/exec-data-json-export/block_exporter.go b/cmd/util/cmd/exec-data-json-export/block_exporter.go index 834609c783b..2e178d08af6 100644 --- a/cmd/util/cmd/exec-data-json-export/block_exporter.go +++ b/cmd/util/cmd/exec-data-json-export/block_exporter.go @@ -16,12 +16,12 @@ import ( ) type blockSummary struct { - BlockHeight uint64 `json:"block_height"` - BlockID string `json:"block_id"` - ParentBlockID string `json:"parent_block_id"` - ParentVoterIDs []string `json:"parent_voter_ids"` - // ParentVoterSigData []string `json:"parent_voter_sig"` - ProposerID string `json:"proposer_id"` + BlockHeight uint64 `json:"block_height"` + BlockID string `json:"block_id"` + ParentBlockID string `json:"parent_block_id"` + ParentVoterIndices string `json:"parent_voter_indices"` + ParentVoterSigData string `json:"parent_voter_sig"` + ProposerID string `json:"proposer_id"` // ProposerSigData string `json:"proposer_sig"` Timestamp time.Time `json:"timestamp"` CollectionIDs []string `json:"collection_ids"` @@ -87,22 +87,18 @@ func ExportBlocks(blockID flow.Identifier, dbPath string, outputPath string) (fl sealsStates = append(sealsStates, hex.EncodeToString(s.FinalState[:])) } - pvIDs := make([]string, 0) - for _, i := range header.ParentVoterIDs { - pvIDs = append(pvIDs, hex.EncodeToString(i[:])) - } - b := blockSummary{ - BlockID: hex.EncodeToString(activeBlockID[:]), - BlockHeight: header.Height, - ParentBlockID: hex.EncodeToString(header.ParentID[:]), - ParentVoterIDs: pvIDs, - ProposerID: hex.EncodeToString(header.ProposerID[:]), - Timestamp: header.Timestamp, - CollectionIDs: cols, - SealedBlocks: seals, - SealedResults: sealsResults, - SealedFinalStates: sealsStates, + BlockID: hex.EncodeToString(activeBlockID[:]), + BlockHeight: header.Height, + ParentBlockID: hex.EncodeToString(header.ParentID[:]), + ParentVoterIndices: hex.EncodeToString(header.ParentVoterIndices), + ParentVoterSigData: hex.EncodeToString(header.ParentVoterSigData), + ProposerID: hex.EncodeToString(header.ProposerID[:]), + Timestamp: header.Timestamp, + CollectionIDs: cols, + SealedBlocks: seals, + SealedResults: sealsResults, + SealedFinalStates: sealsStates, } jsonData, err := json.Marshal(b) diff --git a/consensus/follower_test.go b/consensus/follower_test.go index 67efc7f58bf..70cae98c718 100644 --- a/consensus/follower_test.go +++ b/consensus/follower_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + "github.com/onflow/flow-go/module/signature" + "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -68,9 +70,9 @@ func (s *HotStuffFollowerSuite) SetupTest() { // mock consensus committee s.committee = &mockhotstuff.Committee{} - s.committee.On("Identities", mock.Anything, mock.Anything).Return( - func(blockID flow.Identifier, selector flow.IdentityFilter) flow.IdentityList { - return identities.Filter(selector) + s.committee.On("Identities", mock.Anything).Return( + func(blockID flow.Identifier) flow.IdentityList { + return identities }, nil, ) @@ -105,10 +107,13 @@ func (s *HotStuffFollowerSuite) SetupTest() { Height: 21053, View: 52078, } + + signerIndices, err := signature.EncodeSignersToIndices(identities.NodeIDs(), identities.NodeIDs()[:3]) + require.NoError(s.T(), err) s.rootQC = &flow.QuorumCertificate{ - View: s.rootHeader.View, - BlockID: s.rootHeader.ID(), - SignerIDs: identities.NodeIDs()[:3], + View: s.rootHeader.View, + BlockID: s.rootHeader.ID(), + SignerIndices: signerIndices, } // we start with the latest finalized block being the root block @@ -335,6 +340,7 @@ func (mc *MockConsensus) extendBlock(blockView uint64, parent *flow.Header) *flo nextBlock := unittest.BlockHeaderWithParentFixture(parent) nextBlock.View = blockView nextBlock.ProposerID = mc.identities[int(blockView)%len(mc.identities)].NodeID - nextBlock.ParentVoterIDs = mc.identities.NodeIDs() + signerIndices, _ := signature.EncodeSignersToIndices(mc.identities.NodeIDs(), mc.identities.NodeIDs()) + nextBlock.ParentVoterIndices = signerIndices return &nextBlock } diff --git a/consensus/hotstuff/blockproducer/block_producer.go b/consensus/hotstuff/blockproducer/block_producer.go index 0f44ca1da9d..8baafdab598 100644 --- a/consensus/hotstuff/blockproducer/block_producer.go +++ b/consensus/hotstuff/blockproducer/block_producer.go @@ -33,7 +33,7 @@ func (bp *BlockProducer) MakeBlockProposal(qc *flow.QuorumCertificate, view uint // in hotstuff, we use this for view number and signature-related fields setHotstuffFields := func(header *flow.Header) error { header.View = view - header.ParentVoterIDs = qc.SignerIDs + header.ParentVoterIndices = qc.SignerIndices header.ParentVoterSigData = qc.SigData header.ProposerID = bp.committee.Self() diff --git a/consensus/hotstuff/committee.go b/consensus/hotstuff/committee.go index da603f9389d..09f62f43e5d 100644 --- a/consensus/hotstuff/committee.go +++ b/consensus/hotstuff/committee.go @@ -6,7 +6,7 @@ import ( ) // Committee accounts for the fact that we might have multiple HotStuff instances -// (collector committees and main consensus committee). Each hostuff instance is supposed to +// (collector committees and main consensus committee). Each HotStuff instance is supposed to // have a dedicated Committee state. // A Committee provides subset of the protocol.State, which is restricted to exactly those // nodes that participate in the current HotStuff instance: the state of all legitimate HotStuff @@ -17,14 +17,12 @@ import ( // Given a collector block, some logic is required to find the main consensus block // for determining the valid collector-HotStuff participants. type Committee interface { - // Identities returns a IdentityList with legitimate HotStuff participants for the specified block. - // The list of participants is filtered by the provided selector. The returned list of HotStuff participants + // The returned list of HotStuff participants // * contains nodes that are allowed to sign the specified block (legitimate participants with NON-ZERO WEIGHT) // * is ordered in the canonical order // * contains no duplicates. - // The list of all legitimate HotStuff participants for the specified block can be obtained by using `filter.Any` - Identities(blockID flow.Identifier, selector flow.IdentityFilter) (flow.IdentityList, error) + Identities(blockID flow.Identifier) (flow.IdentityList, error) // Identity returns the full Identity for specified HotStuff participant. // The node must be a legitimate HotStuff participant with NON-ZERO WEIGHT at the specified block. diff --git a/consensus/hotstuff/committees/cluster_committee.go b/consensus/hotstuff/committees/cluster_committee.go index 34dced32efa..99e52c8f73e 100644 --- a/consensus/hotstuff/committees/cluster_committee.go +++ b/consensus/hotstuff/committees/cluster_committee.go @@ -61,8 +61,12 @@ func NewClusterCommittee( return com, nil } -func (c *Cluster) Identities(blockID flow.Identifier, selector flow.IdentityFilter) (flow.IdentityList, error) { - +// Identities returns the identities of all cluster members that are authorized to +// participate at the given block. The order of the identities is the canonical order. +func (c *Cluster) Identities(blockID flow.Identifier) (flow.IdentityList, error) { + // blockID is a collection block not a block produced by consensus, + // to query the identities from protocol state, we need to use the reference block id from the payload + // // first retrieve the cluster block payload payload, err := c.payloads.ByBlockID(blockID) if err != nil { @@ -74,14 +78,12 @@ func (c *Cluster) Identities(blockID flow.Identifier, selector flow.IdentityFilt // use the initial cluster members for root block if isRootBlock { - return c.initialClusterMembers.Filter(selector), nil + return c.initialClusterMembers, nil } // otherwise use the snapshot given by the reference block - identities, err := c.state.AtBlockID(payload.ReferenceBlockID).Identities(filter.And( - selector, - c.clusterMemberFilter, - )) + identities, err := c.state.AtBlockID(payload.ReferenceBlockID).Identities(c.clusterMemberFilter) // remove ejected nodes + return identities, err } diff --git a/consensus/hotstuff/committees/consensus_committee.go b/consensus/hotstuff/committees/consensus_committee.go index 13e408d2bd2..a6a3fc2c64a 100644 --- a/consensus/hotstuff/committees/consensus_committee.go +++ b/consensus/hotstuff/committees/consensus_committee.go @@ -68,11 +68,10 @@ func NewConsensusCommittee(state protocol.State, me flow.Identifier) (*Consensus return com, nil } -func (c *Consensus) Identities(blockID flow.Identifier, selector flow.IdentityFilter) (flow.IdentityList, error) { - il, err := c.state.AtBlockID(blockID).Identities(filter.And( - filter.IsVotingConsensusCommitteeMember, - selector, - )) +// Identities returns the identities of all authorized consensus participants at the given block. +// The order of the identities is the canonical order. +func (c *Consensus) Identities(blockID flow.Identifier) (flow.IdentityList, error) { + il, err := c.state.AtBlockID(blockID).Identities(filter.IsVotingConsensusCommitteeMember) return il, err } diff --git a/consensus/hotstuff/committees/leader/leader_selection_test.go b/consensus/hotstuff/committees/leader/leader_selection_test.go index ef06b3157ea..7d580c76a6a 100644 --- a/consensus/hotstuff/committees/leader/leader_selection_test.go +++ b/consensus/hotstuff/committees/leader/leader_selection_test.go @@ -367,61 +367,54 @@ func TestZeroWeightNodeWillNotBeSelected(t *testing.T) { t.Run("fuzzy set", func(t *testing.T) { toolRng := prg(t, someSeed) - // TODO: randomize the test at each iteration - for i := 0; i < 1; i++ { - // create 1002 nodes with all 0 weight - identities := unittest.IdentityListFixture(1002, unittest.WithWeight(0)) + // create 1002 nodes with all 0 weight + identities := unittest.IdentityListFixture(1002, unittest.WithWeight(0)) - // create 2 nodes with 1 weight, and place them in between - // index 233-777 - n := toolRng.UintN(777-233) + 233 - m := toolRng.UintN(777-233) + 233 - identities[n].Weight = 1 - identities[m].Weight = 1 + // create 2 nodes with 1 weight, and place them in between + // index 233-777 + n := toolRng.UintN(777-233) + 233 + m := toolRng.UintN(777-233) + 233 + identities[n].Weight = 1 + identities[m].Weight = 1 - // the following code check the zero weight node should not be selected - weightful := identities.Filter(filter.HasWeight(true)) + // the following code check the zero weight node should not be selected + weightful := identities.Filter(filter.HasWeight(true)) - count := 1000 + count := 1000 + selectionFromAll, err := ComputeLeaderSelection(0, rng, count, identities) + require.NoError(t, err) - selectionFromAll, err := ComputeLeaderSelection(0, rng, count, identities) - require.NoError(t, err) + selectionFromWeightful, err := ComputeLeaderSelection(0, rng_copy, count, weightful) + require.NoError(t, err) - selectionFromWeightful, err := ComputeLeaderSelection(0, rng_copy, count, weightful) + for i := 0; i < count; i++ { + nodeIDFromAll, err := selectionFromAll.LeaderForView(uint64(i)) require.NoError(t, err) - for j := 0; j < count; j++ { - nodeIDFromAll, err := selectionFromAll.LeaderForView(uint64(j)) - require.NoError(t, err) - - nodeIDFromWeightful, err := selectionFromWeightful.LeaderForView(uint64(j)) - require.NoError(t, err) + nodeIDFromWeightful, err := selectionFromWeightful.LeaderForView(uint64(i)) + require.NoError(t, err) - // the selection should be the same - require.Equal(t, nodeIDFromWeightful, nodeIDFromAll) - } + // the selection should be the same + require.Equal(t, nodeIDFromWeightful, nodeIDFromAll) } t.Run("if there is only 1 node has weight, then it will be always be the leader and the only leader", func(t *testing.T) { toolRng := prg(t, someSeed) - // TODO: randomize the test at each iteration - for i := 0; i < 1; i++ { - identities := unittest.IdentityListFixture(1000, unittest.WithWeight(0)) + identities := unittest.IdentityListFixture(1000, unittest.WithWeight(0)) - n := toolRng.UintN(1000) - weight := n + 1 - identities[n].Weight = weight - onlyNodeWithWeight := identities[n] + n := rng.UintN(1000) + weight := n + 1 + identities[n].Weight = weight + onlyNodeWithWeight := identities[n] - selections, err := ComputeLeaderSelection(0, rng, 1000, identities) - require.NoError(t, err) + selections, err := ComputeLeaderSelection(0, toolRng, 1000, identities) + require.NoError(t, err) - for j := 0; j < 1000; j++ { - nodeID, err := selections.LeaderForView(uint64(j)) - require.NoError(t, err) - require.Equal(t, onlyNodeWithWeight.NodeID, nodeID) - } + for i := 0; i < 1000; i++ { + nodeID, err := selections.LeaderForView(uint64(i)) + require.NoError(t, err) + require.Equal(t, onlyNodeWithWeight.NodeID, nodeID) } }) }) diff --git a/consensus/hotstuff/committees/metrics_wrapper.go b/consensus/hotstuff/committees/metrics_wrapper.go index 0dc9d2d3721..3ca30fbe075 100644 --- a/consensus/hotstuff/committees/metrics_wrapper.go +++ b/consensus/hotstuff/committees/metrics_wrapper.go @@ -28,9 +28,9 @@ func NewMetricsWrapper(committee hotstuff.Committee, metrics module.HotstuffMetr } } -func (w CommitteeMetricsWrapper) Identities(blockID flow.Identifier, selector flow.IdentityFilter) (flow.IdentityList, error) { +func (w CommitteeMetricsWrapper) Identities(blockID flow.Identifier) (flow.IdentityList, error) { processStart := time.Now() - identities, err := w.committee.Identities(blockID, selector) + identities, err := w.committee.Identities(blockID) w.metrics.CommitteeProcessingDuration(time.Since(processStart)) return identities, err } diff --git a/consensus/hotstuff/committees/static.go b/consensus/hotstuff/committees/static.go index 3d28a1dd778..c58fad2bc72 100644 --- a/consensus/hotstuff/committees/static.go +++ b/consensus/hotstuff/committees/static.go @@ -6,24 +6,26 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/state/protocol" ) // NewStaticCommittee returns a new committee with a static participant set. func NewStaticCommittee(participants flow.IdentityList, myID flow.Identifier, dkgParticipants map[flow.Identifier]flow.DKGParticipant, dkgGroupKey crypto.PublicKey) (*Static, error) { - static := &Static{ - participants: participants, - myID: myID, - dkg: staticDKG{ - dkgParticipants: dkgParticipants, - dkgGroupKey: dkgGroupKey, - }, - } - return static, nil + + return NewStaticCommitteeWithDKG(participants, myID, staticDKG{ + dkgParticipants: dkgParticipants, + dkgGroupKey: dkgGroupKey, + }) } // NewStaticCommitteeWithDKG returns a new committee with a static participant set. func NewStaticCommitteeWithDKG(participants flow.IdentityList, myID flow.Identifier, dkg protocol.DKG) (*Static, error) { + valid := order.IdentityListCanonical(participants) + if !valid { + return nil, fmt.Errorf("participants %v is not in Canonical order", participants) + } + static := &Static{ participants: participants, myID: myID, @@ -40,8 +42,8 @@ type Static struct { dkg protocol.DKG } -func (s Static) Identities(_ flow.Identifier, selector flow.IdentityFilter) (flow.IdentityList, error) { - return s.participants.Filter(selector), nil +func (s Static) Identities(_ flow.Identifier) (flow.IdentityList, error) { + return s.participants, nil } func (s Static) Identity(_ flow.Identifier, participantID flow.Identifier) (*flow.Identity, error) { diff --git a/consensus/hotstuff/eventhandler/event_handler.go b/consensus/hotstuff/eventhandler/event_handler.go index a3daee0238c..b47fb3202ca 100644 --- a/consensus/hotstuff/eventhandler/event_handler.go +++ b/consensus/hotstuff/eventhandler/event_handler.go @@ -407,7 +407,6 @@ func (e *EventHandler) processQC(qc *flow.QuorumCertificate) error { log := e.log.With(). Uint64("block_view", qc.View). Hex("block_id", qc.BlockID[:]). - Int("signers", len(qc.SignerIDs)). Logger() err := e.forks.AddQC(qc) diff --git a/consensus/hotstuff/eventhandler/event_handler_test.go b/consensus/hotstuff/eventhandler/event_handler_test.go index 80fdebb44da..85eed9d1fc2 100644 --- a/consensus/hotstuff/eventhandler/event_handler_test.go +++ b/consensus/hotstuff/eventhandler/event_handler_test.go @@ -375,10 +375,10 @@ func (es *EventHandlerSuite) SetupTest() { // voting block is a block for the current view, which will trigger view change es.votingBlock = createBlockWithQC(es.paceMaker.CurView(), es.paceMaker.CurView()-1) es.qc = &flow.QuorumCertificate{ - BlockID: es.votingBlock.BlockID, - View: es.votingBlock.View, - SignerIDs: nil, - SigData: nil, + BlockID: es.votingBlock.BlockID, + View: es.votingBlock.View, + SignerIndices: nil, + SigData: nil, } es.newview = &model.NewViewEvent{ View: es.votingBlock.View + 1, // the vote for the voting blocks will trigger a view change to the next view @@ -852,10 +852,10 @@ func createBlockWithQC(view uint64, qcview uint64) *model.Block { func createQC(parent *model.Block) *flow.QuorumCertificate { qc := &flow.QuorumCertificate{ - BlockID: parent.BlockID, - View: parent.View, - SignerIDs: nil, - SigData: nil, + BlockID: parent.BlockID, + View: parent.View, + SignerIndices: nil, + SigData: nil, } return qc } diff --git a/consensus/hotstuff/forks/test/block_builder.go b/consensus/hotstuff/forks/test/block_builder.go index a3f08237197..7f9d7488d22 100644 --- a/consensus/hotstuff/forks/test/block_builder.go +++ b/consensus/hotstuff/forks/test/block_builder.go @@ -88,10 +88,10 @@ func (f *BlockBuilder) Blocks() ([]*model.Block, error) { // generate QC for the new block qcs[bv.BlockIndex()] = &flow.QuorumCertificate{ - View: block.View, - BlockID: block.BlockID, - SignerIDs: nil, - SigData: nil, + View: block.View, + BlockID: block.BlockID, + SignerIndices: nil, + SigData: nil, } } diff --git a/consensus/hotstuff/helper/block.go b/consensus/hotstuff/helper/block.go index 57ab58ea4ce..7e029f8b58b 100644 --- a/consensus/hotstuff/helper/block.go +++ b/consensus/hotstuff/helper/block.go @@ -44,9 +44,9 @@ func WithParentBlock(parent *model.Block) func(*model.Block) { } } -func WithParentSigners(signerIDs []flow.Identifier) func(*model.Block) { +func WithParentSigners(signerIndices []byte) func(*model.Block) { return func(block *model.Block) { - block.QC.SignerIDs = signerIDs + block.QC.SignerIndices = signerIndices } } diff --git a/consensus/hotstuff/helper/quorum_certificate.go b/consensus/hotstuff/helper/quorum_certificate.go index 5269baac3e3..2f45d8fcbac 100644 --- a/consensus/hotstuff/helper/quorum_certificate.go +++ b/consensus/hotstuff/helper/quorum_certificate.go @@ -10,10 +10,10 @@ import ( func MakeQC(options ...func(*flow.QuorumCertificate)) *flow.QuorumCertificate { qc := flow.QuorumCertificate{ - View: rand.Uint64(), - BlockID: unittest.IdentifierFixture(), - SignerIDs: unittest.IdentityListFixture(7).NodeIDs(), - SigData: unittest.SignatureFixture(), + View: rand.Uint64(), + BlockID: unittest.IdentifierFixture(), + SignerIndices: unittest.SignerIndicesFixture(3), + SigData: unittest.SignatureFixture(), } for _, option := range options { option(&qc) @@ -28,9 +28,9 @@ func WithQCBlock(block *model.Block) func(*flow.QuorumCertificate) { } } -func WithQCSigners(signerIDs []flow.Identifier) func(*flow.QuorumCertificate) { +func WithQCSigners(signerIndices []byte) func(*flow.QuorumCertificate) { return func(qc *flow.QuorumCertificate) { - qc.SignerIDs = signerIDs + qc.SignerIndices = signerIndices } } diff --git a/consensus/hotstuff/integration/instance_test.go b/consensus/hotstuff/integration/instance_test.go index f8222745250..900d446aed4 100644 --- a/consensus/hotstuff/integration/instance_test.go +++ b/consensus/hotstuff/integration/instance_test.go @@ -23,7 +23,6 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/notifications" "github.com/onflow/flow-go/consensus/hotstuff/pacemaker" "github.com/onflow/flow-go/consensus/hotstuff/pacemaker/timeout" - hsig "github.com/onflow/flow-go/consensus/hotstuff/signature" "github.com/onflow/flow-go/consensus/hotstuff/validator" "github.com/onflow/flow-go/consensus/hotstuff/voteaggregator" "github.com/onflow/flow-go/consensus/hotstuff/votecollector" @@ -139,9 +138,9 @@ func NewInstance(t require.TestingT, options ...Option) *Instance { in.headers[cfg.Root.ID()] = cfg.Root // program the hotstuff committee state - in.committee.On("Identities", mock.Anything, mock.Anything).Return( - func(blockID flow.Identifier, selector flow.IdentityFilter) flow.IdentityList { - return in.participants.Filter(selector) + in.committee.On("Identities", mock.Anything).Return( + func(blockID flow.Identifier) flow.IdentityList { + return in.participants }, nil, ) @@ -208,7 +207,7 @@ func NewInstance(t require.TestingT, options ...Option) *Instance { View: block.View, BlockID: block.BlockID, SignerID: in.localID, - SigData: unittest.RandomBytes(hsig.SigLen * 2), // double sig, one staking, one beacon + SigData: unittest.RandomBytes(msig.SigLen * 2), // double sig, one staking, one beacon } return vote }, @@ -216,15 +215,19 @@ func NewInstance(t require.TestingT, options ...Option) *Instance { ) in.signer.On("CreateQC", mock.Anything).Return( func(votes []*model.Vote) *flow.QuorumCertificate { - voterIDs := make([]flow.Identifier, 0, len(votes)) + voterIDs := make(flow.IdentifierList, 0, len(votes)) for _, vote := range votes { voterIDs = append(voterIDs, vote.SignerID) } + + signerIndices, err := msig.EncodeSignersToIndices(in.participants.NodeIDs(), voterIDs) + require.NoError(t, err, "could not encode signer indices") + qc := &flow.QuorumCertificate{ - View: votes[0].View, - BlockID: votes[0].BlockID, - SignerIDs: voterIDs, - SigData: nil, + View: votes[0].View, + BlockID: votes[0].BlockID, + SignerIndices: signerIndices, + SigData: nil, } return qc }, @@ -318,10 +321,14 @@ func NewInstance(t require.TestingT, options ...Option) *Instance { // initialize the finalizer rootBlock := model.BlockFromFlow(cfg.Root, 0) + + signerIndices, err := msig.EncodeSignersToIndices(in.participants.NodeIDs(), in.participants.NodeIDs()) + require.NoError(t, err, "could not encode signer indices") + rootQC := &flow.QuorumCertificate{ - View: rootBlock.View, - BlockID: rootBlock.BlockID, - SignerIDs: in.participants.NodeIDs(), + View: rootBlock.View, + BlockID: rootBlock.BlockID, + SignerIndices: signerIndices, } rootBlockQC := &forks.BlockQC{Block: rootBlock, QC: rootQC} forkalizer, err := finalizer.New(rootBlockQC, in.finalizer, notifier) @@ -344,8 +351,11 @@ func NewInstance(t require.TestingT, options ...Option) *Instance { rbRector := helper.MakeRandomBeaconReconstructor(msig.RandomBeaconThreshold(int(in.participants.Count()))) rbRector.On("Verify", mock.Anything, mock.Anything).Return(nil).Maybe() + indices, err := msig.EncodeSignersToIndices(in.participants.NodeIDs(), []flow.Identifier(in.participants.NodeIDs())) + require.NoError(t, err) + packer := &mocks.Packer{} - packer.On("Pack", mock.Anything, mock.Anything).Return(in.participants.NodeIDs(), unittest.RandomBytes(128), nil).Maybe() + packer.On("Pack", mock.Anything, mock.Anything).Return(indices, unittest.RandomBytes(128), nil).Maybe() onQCCreated := func(qc *flow.QuorumCertificate) { in.queue <- qc diff --git a/consensus/hotstuff/mocks/committee.go b/consensus/hotstuff/mocks/committee.go index 2716804b4c7..07cda1fc07a 100644 --- a/consensus/hotstuff/mocks/committee.go +++ b/consensus/hotstuff/mocks/committee.go @@ -39,13 +39,13 @@ func (_m *Committee) DKG(blockID flow.Identifier) (hotstuff.DKG, error) { return r0, r1 } -// Identities provides a mock function with given fields: blockID, selector -func (_m *Committee) Identities(blockID flow.Identifier, selector flow.IdentityFilter) (flow.IdentityList, error) { - ret := _m.Called(blockID, selector) +// Identities provides a mock function with given fields: blockID +func (_m *Committee) Identities(blockID flow.Identifier) (flow.IdentityList, error) { + ret := _m.Called(blockID) var r0 flow.IdentityList - if rf, ok := ret.Get(0).(func(flow.Identifier, flow.IdentityFilter) flow.IdentityList); ok { - r0 = rf(blockID, selector) + if rf, ok := ret.Get(0).(func(flow.Identifier) flow.IdentityList); ok { + r0 = rf(blockID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(flow.IdentityList) @@ -53,8 +53,8 @@ func (_m *Committee) Identities(blockID flow.Identifier, selector flow.IdentityF } var r1 error - if rf, ok := ret.Get(1).(func(flow.Identifier, flow.IdentityFilter) error); ok { - r1 = rf(blockID, selector) + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(blockID) } else { r1 = ret.Error(1) } diff --git a/consensus/hotstuff/mocks/packer.go b/consensus/hotstuff/mocks/packer.go index dce85bc4b8a..1e5b60a8e4d 100644 --- a/consensus/hotstuff/mocks/packer.go +++ b/consensus/hotstuff/mocks/packer.go @@ -17,15 +17,15 @@ type Packer struct { } // Pack provides a mock function with given fields: blockID, sig -func (_m *Packer) Pack(blockID flow.Identifier, sig *hotstuff.BlockSignatureData) ([]flow.Identifier, []byte, error) { +func (_m *Packer) Pack(blockID flow.Identifier, sig *hotstuff.BlockSignatureData) ([]byte, []byte, error) { ret := _m.Called(blockID, sig) - var r0 []flow.Identifier - if rf, ok := ret.Get(0).(func(flow.Identifier, *hotstuff.BlockSignatureData) []flow.Identifier); ok { + var r0 []byte + if rf, ok := ret.Get(0).(func(flow.Identifier, *hotstuff.BlockSignatureData) []byte); ok { r0 = rf(blockID, sig) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]flow.Identifier) + r0 = ret.Get(0).([]byte) } } @@ -48,13 +48,13 @@ func (_m *Packer) Pack(blockID flow.Identifier, sig *hotstuff.BlockSignatureData return r0, r1, r2 } -// Unpack provides a mock function with given fields: blockID, signerIDs, sigData -func (_m *Packer) Unpack(blockID flow.Identifier, signerIDs []flow.Identifier, sigData []byte) (*hotstuff.BlockSignatureData, error) { - ret := _m.Called(blockID, signerIDs, sigData) +// Unpack provides a mock function with given fields: signerIdentities, sigData +func (_m *Packer) Unpack(signerIdentities flow.IdentityList, sigData []byte) (*hotstuff.BlockSignatureData, error) { + ret := _m.Called(signerIdentities, sigData) var r0 *hotstuff.BlockSignatureData - if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.Identifier, []byte) *hotstuff.BlockSignatureData); ok { - r0 = rf(blockID, signerIDs, sigData) + if rf, ok := ret.Get(0).(func(flow.IdentityList, []byte) *hotstuff.BlockSignatureData); ok { + r0 = rf(signerIdentities, sigData) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*hotstuff.BlockSignatureData) @@ -62,8 +62,8 @@ func (_m *Packer) Unpack(blockID flow.Identifier, signerIDs []flow.Identifier, s } var r1 error - if rf, ok := ret.Get(1).(func(flow.Identifier, []flow.Identifier, []byte) error); ok { - r1 = rf(blockID, signerIDs, sigData) + if rf, ok := ret.Get(1).(func(flow.IdentityList, []byte) error); ok { + r1 = rf(signerIdentities, sigData) } else { r1 = ret.Error(1) } diff --git a/consensus/hotstuff/mocks/verifier.go b/consensus/hotstuff/mocks/verifier.go index bdf49340a5c..f6847c239f8 100644 --- a/consensus/hotstuff/mocks/verifier.go +++ b/consensus/hotstuff/mocks/verifier.go @@ -17,13 +17,13 @@ type Verifier struct { mock.Mock } -// VerifyQC provides a mock function with given fields: voters, sigData, block -func (_m *Verifier) VerifyQC(voters flow.IdentityList, sigData []byte, block *model.Block) error { - ret := _m.Called(voters, sigData, block) +// VerifyQC provides a mock function with given fields: signers, sigData, block +func (_m *Verifier) VerifyQC(signers flow.IdentityList, sigData []byte, block *model.Block) error { + ret := _m.Called(signers, sigData, block) var r0 error if rf, ok := ret.Get(0).(func(flow.IdentityList, []byte, *model.Block) error); ok { - r0 = rf(voters, sigData, block) + r0 = rf(signers, sigData, block) } else { r0 = ret.Error(0) } diff --git a/consensus/hotstuff/model/block.go b/consensus/hotstuff/model/block.go index 5496c2bc223..a8f952ba11a 100644 --- a/consensus/hotstuff/model/block.go +++ b/consensus/hotstuff/model/block.go @@ -21,10 +21,10 @@ type Block struct { func BlockFromFlow(header *flow.Header, parentView uint64) *Block { qc := flow.QuorumCertificate{ - BlockID: header.ParentID, - View: parentView, - SignerIDs: header.ParentVoterIDs, - SigData: header.ParentVoterSigData, + BlockID: header.ParentID, + View: parentView, + SignerIndices: header.ParentVoterIndices, + SigData: header.ParentVoterSigData, } block := Block{ diff --git a/consensus/hotstuff/model/errors.go b/consensus/hotstuff/model/errors.go index a41a57b24b2..8dc0a92998b 100644 --- a/consensus/hotstuff/model/errors.go +++ b/consensus/hotstuff/model/errors.go @@ -9,7 +9,6 @@ import ( var ( ErrUnverifiableBlock = errors.New("block proposal can't be verified, because its view is above the finalized view, but its QC is below the finalized view") - ErrInvalidFormat = errors.New("invalid signature format") ErrInvalidSignature = errors.New("invalid signature") ) @@ -26,6 +25,28 @@ func IsNoVoteError(err error) bool { return errors.As(err, &e) } +// InvalidFormatError indicates that some data has an incompatible format. +type InvalidFormatError struct { + err error +} + +func NewInvalidFormatError(err error) error { + return InvalidFormatError{err} +} + +func NewInvalidFormatErrorf(msg string, args ...interface{}) error { + return InvalidFormatError{fmt.Errorf(msg, args...)} +} + +func (e InvalidFormatError) Error() string { return e.err.Error() } +func (e InvalidFormatError) Unwrap() error { return e.err } + +// IsInvalidFormatError returns whether err is a InvalidFormatError +func IsInvalidFormatError(err error) bool { + var e InvalidFormatError + return errors.As(err, &e) +} + // ConfigurationError indicates that a constructor or component was initialized with // invalid or inconsistent parameters. type ConfigurationError struct { diff --git a/consensus/hotstuff/model/proposal.go b/consensus/hotstuff/model/proposal.go index f39c9118549..d0f31290d15 100644 --- a/consensus/hotstuff/model/proposal.go +++ b/consensus/hotstuff/model/proposal.go @@ -44,7 +44,7 @@ func ProposalToFlow(proposal *Proposal) *flow.Header { PayloadHash: block.PayloadHash, Timestamp: block.Timestamp, View: block.View, - ParentVoterIDs: block.QC.SignerIDs, + ParentVoterIndices: block.QC.SignerIndices, ParentVoterSigData: block.QC.SigData, ProposerID: block.ProposerID, ProposerSigData: proposal.SigData, diff --git a/consensus/hotstuff/packer/packer.go b/consensus/hotstuff/model/signature_data.go similarity index 90% rename from consensus/hotstuff/packer/packer.go rename to consensus/hotstuff/model/signature_data.go index c50a0114c94..e85f5f600a7 100644 --- a/consensus/hotstuff/packer/packer.go +++ b/consensus/hotstuff/model/signature_data.go @@ -1,10 +1,8 @@ -package packer +package model import ( "bytes" - "fmt" - "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/encoding/rlp" ) @@ -49,7 +47,7 @@ func UnpackRandomBeaconSig(sigData []byte) (crypto.Signature, error) { packer := SigDataPacker{} sig, err := packer.Decode(sigData) if err != nil { - return nil, fmt.Errorf("could not decode sig data %s: %w", err, model.ErrInvalidFormat) + return nil, NewInvalidFormatErrorf("could not decode sig data: %w", err) } return sig.ReconstructedRandomBeaconSig, nil } diff --git a/consensus/hotstuff/signature.go b/consensus/hotstuff/signature.go index 3ba8cb3218c..0fa5deb7c78 100644 --- a/consensus/hotstuff/signature.go +++ b/consensus/hotstuff/signature.go @@ -53,24 +53,6 @@ type RandomBeaconReconstructor interface { Reconstruct() (crypto.Signature, error) } -// SigType is the aggregable signature type. -type SigType uint8 - -// SigType specifies the role of the signature in the protocol. -// Both types are aggregatable cryptographic signatures. -// * SigTypeRandomBeacon type is for random beacon signatures. -// * SigTypeStaking is for Hotstuff signatures. -const ( - SigTypeStaking SigType = iota - SigTypeRandomBeacon -) - -// Valid returns true if the signature is either SigTypeStaking or SigTypeRandomBeacon -// else return false -func (t SigType) Valid() bool { - return t == SigTypeStaking || t == SigTypeRandomBeacon -} - // WeightedSignatureAggregator aggregates signatures of the same signature scheme and the // same message from different signers. The public keys and message are agreed upon upfront. // It is also recommended to only aggregate signatures generated with keys representing @@ -124,13 +106,12 @@ type Packer interface { // sig is the aggregated signature data. // Expected error returns during normal operations: // * none; all errors are symptoms of inconsistent input data or corrupted internal state. - Pack(blockID flow.Identifier, sig *BlockSignatureData) ([]flow.Identifier, []byte, error) + Pack(blockID flow.Identifier, sig *BlockSignatureData) (signerIndices []byte, sigData []byte, err error) // Unpack de-serializes the provided signature data. - // blockID is the block that the aggregated sig is signed for // sig is the aggregated signature data // It returns: // - (sigData, nil) if successfully unpacked the signature data - // - (nil, model.ErrInvalidFormat) if failed to unpack the signature data - Unpack(blockID flow.Identifier, signerIDs []flow.Identifier, sigData []byte) (*BlockSignatureData, error) + // - (nil, model.InvalidFormatError) if failed to unpack the signature data + Unpack(signerIdentities flow.IdentityList, sigData []byte) (*BlockSignatureData, error) } diff --git a/consensus/hotstuff/signature/packer.go b/consensus/hotstuff/signature/packer.go index 22edfd7d5e6..66fe719d134 100644 --- a/consensus/hotstuff/signature/packer.go +++ b/consensus/hotstuff/signature/packer.go @@ -1,79 +1,54 @@ package signature import ( + "errors" "fmt" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/consensus/hotstuff/packer" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module/signature" ) // ConsensusSigDataPacker implements the hotstuff.Packer interface. // The encoding method is RLP. type ConsensusSigDataPacker struct { - packer.SigDataPacker + model.SigDataPacker committees hotstuff.Committee } var _ hotstuff.Packer = &ConsensusSigDataPacker{} +// NewConsensusSigDataPacker creates a new ConsensusSigDataPacker instance func NewConsensusSigDataPacker(committees hotstuff.Committee) *ConsensusSigDataPacker { return &ConsensusSigDataPacker{ committees: committees, } } -// Pack serializes the block signature data into raw bytes, suitable to creat a QC. +// Pack serializes the block signature data into raw bytes, suitable to create a QC. // To pack the block signature data, we first build a compact data type, and then encode it into bytes. // Expected error returns during normal operations: // * none; all errors are symptoms of inconsistent input data or corrupted internal state. -func (p *ConsensusSigDataPacker) Pack(blockID flow.Identifier, sig *hotstuff.BlockSignatureData) ([]flow.Identifier, []byte, error) { - // breaking staking and random beacon signers into signerIDs and sig type for compaction - // each signer must have its signerID and sig type stored at the same index in the two slices - count := len(sig.StakingSigners) + len(sig.RandomBeaconSigners) - signerIDs := make([]flow.Identifier, 0, count) - sigTypes := make([]hotstuff.SigType, 0, count) - +func (p *ConsensusSigDataPacker) Pack(blockID flow.Identifier, sig *hotstuff.BlockSignatureData) ([]byte, []byte, error) { // retrieve all authorized consensus participants at the given block - consensus, err := p.committees.Identities(blockID, filter.Any) + fullMembers, err := p.committees.Identities(blockID) if err != nil { return nil, nil, fmt.Errorf("could not find consensus committees by block id(%v): %w", blockID, err) } - // lookup is a map from node identifier to node identity - // it is used to check the given signers are all valid signers at the given block - lookup := consensus.Lookup() - - for _, stakingSigner := range sig.StakingSigners { - _, ok := lookup[stakingSigner] - if ok { - signerIDs = append(signerIDs, stakingSigner) - sigTypes = append(sigTypes, hotstuff.SigTypeStaking) - } else { - return nil, nil, fmt.Errorf("staking signer %v not found in the committee at block: %v", stakingSigner, blockID) - } - } - - for _, beaconSigner := range sig.RandomBeaconSigners { - _, ok := lookup[beaconSigner] - if ok { - signerIDs = append(signerIDs, beaconSigner) - sigTypes = append(sigTypes, hotstuff.SigTypeRandomBeacon) - } else { - return nil, nil, fmt.Errorf("random beacon signer %v not found in the committee at block: %v", beaconSigner, blockID) - } - } - - // serialize the sig type for compaction - serialized, err := serializeToBitVector(sigTypes) + // breaking staking and random beacon signers into signerIDs and sig type for compaction + // each signer must have its signerID and sig type stored at the same index in the two slices + // For v2, RandomBeaconSigners is nil, as we don't track individually which nodes contributed to the random beacon + // For v3, RandomBeaconSigners is not nil, each RandomBeaconSigner also signed staking sig, so the returned signerIDs, should + // include both StakingSigners and RandomBeaconSigners + signerIndices, sigType, err := signature.EncodeSignerToIndicesAndSigType(fullMembers.NodeIDs(), sig.StakingSigners, sig.RandomBeaconSigners) if err != nil { - return nil, nil, fmt.Errorf("could not serialize sig types to bytes at block: %v, %w", blockID, err) + return nil, nil, fmt.Errorf("unexpected internal error while encoding signer indices and sig types: %w", err) } - data := packer.SignatureData{ - SigType: serialized, + data := model.SignatureData{ + SigType: sigType, AggregatedStakingSig: sig.AggregatedStakingSig, AggregatedRandomBeaconSig: sig.AggregatedRandomBeaconSig, ReconstructedRandomBeaconSig: sig.ReconstructedRandomBeaconSig, @@ -85,146 +60,34 @@ func (p *ConsensusSigDataPacker) Pack(blockID flow.Identifier, sig *hotstuff.Blo return nil, nil, fmt.Errorf("could not encode data %v, %w", data, err) } - return signerIDs, encoded, nil + return signerIndices, encoded, nil } // Unpack de-serializes the provided signature data. -// blockID is the block that the aggregated sig is signed for // sig is the aggregated signature data // It returns: // - (sigData, nil) if successfully unpacked the signature data -// - (nil, model.ErrInvalidFormat) if failed to unpack the signature data -func (p *ConsensusSigDataPacker) Unpack(blockID flow.Identifier, signerIDs []flow.Identifier, sigData []byte) (*hotstuff.BlockSignatureData, error) { +// - (nil, model.InvalidFormatError) if failed to unpack the signature data +func (p *ConsensusSigDataPacker) Unpack(signerIdentities flow.IdentityList, sigData []byte) (*hotstuff.BlockSignatureData, error) { // decode into typed data data, err := p.Decode(sigData) if err != nil { - return nil, fmt.Errorf("could not decode sig data %s: %w", err, model.ErrInvalidFormat) + return nil, model.NewInvalidFormatErrorf("could not decode sig data %s", err) } - // deserialize the compact sig types - sigTypes, err := deserializeFromBitVector(data.SigType, len(signerIDs)) + stakingSigners, randomBeaconSigners, err := signature.DecodeSigTypeToStakingAndBeaconSigners(signerIdentities, data.SigType) if err != nil { - return nil, fmt.Errorf("failed to deserialize sig types from bytes: %w", err) - } - - // read all the possible signer IDs at the given block - consensus, err := p.committees.Identities(blockID, filter.Any) - if err != nil { - return nil, fmt.Errorf("could not find consensus committees by block id(%v): %w", blockID, err) - } - - // lookup is a map from node identifier to node identity - // it is used to check the given signerIDs are all valid signers at the given block - lookup := consensus.Lookup() - - // read each signer's signerID and sig type from two different slices - // group signers by its sig type - stakingSigners := make([]flow.Identifier, 0, len(signerIDs)) - randomBeaconSigners := make([]flow.Identifier, 0, len(signerIDs)) - - for i, sigType := range sigTypes { - signerID := signerIDs[i] - _, ok := lookup[signerID] - if !ok { - return nil, fmt.Errorf("unknown signer ID (%v) at the given block (%v): %w", - signerID, blockID, model.ErrInvalidFormat) - } - - if sigType == hotstuff.SigTypeStaking { - stakingSigners = append(stakingSigners, signerID) - } else if sigType == hotstuff.SigTypeRandomBeacon { - randomBeaconSigners = append(randomBeaconSigners, signerID) - } else { - return nil, fmt.Errorf("unknown sigType %v, %w", sigType, model.ErrInvalidFormat) + if errors.Is(err, signature.ErrIllegallyPaddedBitVector) || errors.Is(err, signature.ErrIncompatibleBitVectorLength) { + return nil, model.NewInvalidFormatErrorf("invalid SigType vector: %w", err) } + return nil, fmt.Errorf("could not decode signer indices and sig type: %w", err) } return &hotstuff.BlockSignatureData{ - StakingSigners: stakingSigners, - RandomBeaconSigners: randomBeaconSigners, + StakingSigners: stakingSigners.NodeIDs(), + RandomBeaconSigners: randomBeaconSigners.NodeIDs(), AggregatedStakingSig: data.AggregatedStakingSig, AggregatedRandomBeaconSig: data.AggregatedRandomBeaconSig, ReconstructedRandomBeaconSig: data.ReconstructedRandomBeaconSig, }, nil } - -// the total number of bytes required to fit the `count` number of bits -func bytesCount(count int) int { - return (count + 7) >> 3 -} - -// serializeToBitVector encodes the given sigTypes into a bit vector. -// We append tailing `0`s to the vector to represent it as bytes. -func serializeToBitVector(sigTypes []hotstuff.SigType) ([]byte, error) { - totalBytes := bytesCount(len(sigTypes)) - bytes := make([]byte, 0, totalBytes) - // a sig type can be converted into one bit, the type at index 0 being converted into the least significant bit: - // the random beacon type is mapped to 1, while the staking type is mapped to 0. - // the remaining unfilled bits in the last byte will be 0 - const initialMask = byte(1 << 7) - - b := byte(0) - mask := initialMask - // iterate through every 8 sig types, and convert it into a byte - for pos, sigType := range sigTypes { - if sigType == hotstuff.SigTypeRandomBeacon { - b ^= mask // set a bit to one - } else if sigType != hotstuff.SigTypeStaking { - return nil, fmt.Errorf("invalid sig type: %v at pos %v", sigType, pos) - } - - mask >>= 1 // move to the next bit - if mask == 0 { // this happens every 8 loop iterations - bytes = append(bytes, b) - b = byte(0) - mask = initialMask - } - } - // add the last byte when the length is not multiple of 8 - if mask != initialMask { - bytes = append(bytes, b) - } - return bytes, nil -} - -// deserializeFromBitVector decodes the sig types from the given bit vector -// - serialized: bit-vector, one bit for each signer (tailing `0`s appended to make full bytes) -// - count: the total number of sig types to be deserialized from the given bytes -// It returns: -// - (sigTypes, nil) if successfully deserialized sig types -// - (nil, model.ErrInvalidFormat) if the number of serialized bytes doesn't match the given number of sig types -// - (nil, model.ErrInvalidFormat) if the remaining bits in the last byte are not all 0s -func deserializeFromBitVector(serialized []byte, count int) ([]hotstuff.SigType, error) { - types := make([]hotstuff.SigType, 0, count) - - // validate the length of serialized vector - // it must be equal to the bytes required to fit exactly `count` number of bits - totalBytes := bytesCount(count) - if len(serialized) != totalBytes { - return nil, fmt.Errorf("encoding sig types of %d signers requires %d bytes but got %d bytes: %w", - count, totalBytes, len(serialized), model.ErrInvalidFormat) - } - - // parse each bit in the bit-vector, bit 0 is SigTypeStaking, bit 1 is SigTypeRandomBeacon - var byt byte - var offset int - for i := 0; i < count; i++ { - byt = serialized[i>>3] - offset = 7 - (i & 7) - mask := byte(1 << offset) - if byt&mask == 0 { - types = append(types, hotstuff.SigTypeStaking) - } else { - types = append(types, hotstuff.SigTypeRandomBeacon) - } - } - - // remaining bits (if any), they must be all `0`s - remainings := byt << (8 - offset) - if remainings != byte(0) { - return nil, fmt.Errorf("the remaining bits are expected to be all 0s, but are %v: %w", - remainings, model.ErrInvalidFormat) - } - - return types, nil -} diff --git a/consensus/hotstuff/signature/packer_test.go b/consensus/hotstuff/signature/packer_test.go index 94743503912..67baa4326f5 100644 --- a/consensus/hotstuff/signature/packer_test.go +++ b/consensus/hotstuff/signature/packer_test.go @@ -1,7 +1,6 @@ package signature import ( - "errors" "fmt" "testing" @@ -12,15 +11,16 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/mocks" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/utils/unittest" ) func newPacker(identities flow.IdentityList) *ConsensusSigDataPacker { // mock consensus committee committee := &mocks.Committee{} - committee.On("Identities", mock.Anything, mock.Anything).Return( - func(blockID flow.Identifier, selector flow.IdentityFilter) flow.IdentityList { - return identities.Filter(selector) + committee.On("Identities", mock.Anything).Return( + func(blockID flow.Identifier) flow.IdentityList { + return identities }, nil, ) @@ -28,15 +28,15 @@ func newPacker(identities flow.IdentityList) *ConsensusSigDataPacker { return NewConsensusSigDataPacker(committee) } -func makeBlockSigData(committee []flow.Identifier) *hotstuff.BlockSignatureData { +func makeBlockSigData(committee flow.IdentityList) *hotstuff.BlockSignatureData { blockSigData := &hotstuff.BlockSignatureData{ StakingSigners: []flow.Identifier{ - committee[0], // A - committee[2], // C + committee[0].NodeID, // A + committee[2].NodeID, // C }, RandomBeaconSigners: []flow.Identifier{ - committee[3], // D - committee[5], // F + committee[3].NodeID, // D + committee[5].NodeID, // F }, AggregatedStakingSig: unittest.SignatureFixture(), AggregatedRandomBeaconSig: unittest.SignatureFixture(), @@ -52,24 +52,25 @@ func makeBlockSigData(committee []flow.Identifier) *hotstuff.BlockSignatureData // aggregated staking sigs are from [A,C] // aggregated random beacon sigs are from [D,F] func TestPackUnpack(t *testing.T) { - identities := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)) - committee := identities.NodeIDs() - // prepare data for testing + committee := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)) blockID := unittest.IdentifierFixture() blockSigData := makeBlockSigData(committee) // create packer with the committee - packer := newPacker(identities) + packer := newPacker(committee) // pack & unpack - signerIDs, sig, err := packer.Pack(blockID, blockSigData) + signerIndices, sig, err := packer.Pack(blockID, blockSigData) require.NoError(t, err) - unpacked, err := packer.Unpack(blockID, signerIDs, sig) + signers, err := signature.DecodeSignerIndicesToIdentities(committee, signerIndices) require.NoError(t, err) - // check that the unpack data match with the original data + unpacked, err := packer.Unpack(signers, sig) + require.NoError(t, err) + + // check that the unpacked data match with the original data require.Equal(t, blockSigData.StakingSigners, unpacked.StakingSigners) require.Equal(t, blockSigData.RandomBeaconSigners, unpacked.RandomBeaconSigners) require.Equal(t, blockSigData.AggregatedStakingSig, unpacked.AggregatedStakingSig) @@ -77,40 +78,41 @@ func TestPackUnpack(t *testing.T) { require.Equal(t, blockSigData.ReconstructedRandomBeaconSig, unpacked.ReconstructedRandomBeaconSig) // check the packed signer IDs - expectedSignerIDs := []flow.Identifier{} + var expectedSignerIDs flow.IdentifierList expectedSignerIDs = append(expectedSignerIDs, blockSigData.StakingSigners...) expectedSignerIDs = append(expectedSignerIDs, blockSigData.RandomBeaconSigners...) - require.Equal(t, expectedSignerIDs, signerIDs) + require.Equal(t, expectedSignerIDs, signers.NodeIDs()) } // if signed by 60 staking nodes, and 50 random beacon nodes among a 200 nodes committee, // it's able to pack and unpack func TestPackUnpackManyNodes(t *testing.T) { - identities := unittest.IdentityListFixture(200, unittest.WithRole(flow.RoleConsensus)) - committee := identities.NodeIDs() - // prepare data for testing + committee := unittest.IdentityListFixture(200, unittest.WithRole(flow.RoleConsensus)) blockID := unittest.IdentifierFixture() blockSigData := makeBlockSigData(committee) stakingSigners := make([]flow.Identifier, 0) for i := 0; i < 60; i++ { - stakingSigners = append(stakingSigners, committee[i]) + stakingSigners = append(stakingSigners, committee[i].NodeID) } randomBeaconSigners := make([]flow.Identifier, 0) for i := 100; i < 100+50; i++ { - randomBeaconSigners = append(randomBeaconSigners, committee[i]) + randomBeaconSigners = append(randomBeaconSigners, committee[i].NodeID) } blockSigData.StakingSigners = stakingSigners blockSigData.RandomBeaconSigners = randomBeaconSigners // create packer with the committee - packer := newPacker(identities) + packer := newPacker(committee) // pack & unpack - signerIDs, sig, err := packer.Pack(blockID, blockSigData) + signerIndices, sig, err := packer.Pack(blockID, blockSigData) + require.NoError(t, err) + + signers, err := signature.DecodeSignerIndicesToIdentities(committee, signerIndices) require.NoError(t, err) - unpacked, err := packer.Unpack(blockID, signerIDs, sig) + unpacked, err := packer.Unpack(signers, sig) require.NoError(t, err) // check that the unpack data match with the original data @@ -121,82 +123,81 @@ func TestPackUnpackManyNodes(t *testing.T) { require.Equal(t, blockSigData.ReconstructedRandomBeaconSig, unpacked.ReconstructedRandomBeaconSig) // check the packed signer IDs - expectedSignerIDs := []flow.Identifier{} + var expectedSignerIDs flow.IdentifierList expectedSignerIDs = append(expectedSignerIDs, blockSigData.StakingSigners...) expectedSignerIDs = append(expectedSignerIDs, blockSigData.RandomBeaconSigners...) - require.Equal(t, expectedSignerIDs, signerIDs) + require.Equal(t, expectedSignerIDs, signers.NodeIDs()) } -// if the sig data can not be decoded, return ErrInvalidFormat +// if the sig data can not be decoded, return model.InvalidFormatError func TestFailToDecode(t *testing.T) { - identities := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)) - committee := identities.NodeIDs() - // prepare data for testing + committee := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)) blockID := unittest.IdentifierFixture() blockSigData := makeBlockSigData(committee) // create packer with the committee - packer := newPacker(identities) + packer := newPacker(committee) - signerIDs, sig, err := packer.Pack(blockID, blockSigData) + signerIndices, sig, err := packer.Pack(blockID, blockSigData) require.NoError(t, err) - // prepare invalid data by modifying the valid data - invalidSigData := sig[1:] - - _, err = packer.Unpack(blockID, signerIDs, invalidSigData) + signers, err := signature.DecodeSignerIndicesToIdentities(committee, signerIndices) + require.NoError(t, err) - require.Error(t, err) - require.True(t, errors.Is(err, model.ErrInvalidFormat)) + // prepare invalid data by modifying the valid data and unpack: + invalidSigData := sig[1:] + _, err = packer.Unpack(signers, invalidSigData) + require.True(t, model.IsInvalidFormatError(err)) } +// TestMismatchSignerIDs // if the signer IDs doesn't match, return InvalidFormatError func TestMismatchSignerIDs(t *testing.T) { - identities := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)) - committee := identities.NodeIDs() - // prepare data for testing + committee := unittest.IdentityListFixture(9, unittest.WithRole(flow.RoleConsensus)) blockID := unittest.IdentifierFixture() - blockSigData := makeBlockSigData(committee) + blockSigData := makeBlockSigData(committee[:6]) // create packer with the committee - packer := newPacker(identities) + packer := newPacker(committee) - signerIDs, sig, err := packer.Pack(blockID, blockSigData) + signerIndices, sig, err := packer.Pack(blockID, blockSigData) require.NoError(t, err) - // prepare invalid signerIDs by modifying the valid signerIDs - // remove the first signer - invalidSignerIDs := signerIDs[1:] - - _, err = packer.Unpack(blockID, invalidSignerIDs, sig) + signers, err := signature.DecodeSignerIndicesToIdentities(committee, signerIndices) + require.NoError(t, err) - require.Error(t, err) - require.True(t, errors.Is(err, model.ErrInvalidFormat)) + // prepare invalid signers by modifying the valid signers + // remove the first signer + invalidSignerIDs := signers[1:] - // prepare invalid signerIDs by modifying the valid signerIDs - // adding one more signer - invalidSignerIDs = append(signerIDs, unittest.IdentifierFixture()) - misPacked, err := packer.Unpack(blockID, invalidSignerIDs, sig) + _, err = packer.Unpack(invalidSignerIDs, sig) + require.True(t, model.IsInvalidFormatError(err)) + // with additional signer + // 9 nodes committee would require two bytes for sig type, the additional byte + // would cause the sig type and signer IDs to be mismatch + invalidSignerIDs = committee + misPacked, err := packer.Unpack(invalidSignerIDs, sig) require.Error(t, err, fmt.Sprintf("packed signers: %v", misPacked)) - require.True(t, errors.Is(err, model.ErrInvalidFormat)) + require.True(t, model.IsInvalidFormatError(err)) } // if sig type doesn't match, return InvalidFormatError func TestInvalidSigType(t *testing.T) { - identities := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)) - committee := identities.NodeIDs() - // prepare data for testing + committee := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)) blockID := unittest.IdentifierFixture() blockSigData := makeBlockSigData(committee) // create packer with the committee - packer := newPacker(identities) + packer := newPacker(committee) + + signerIndices, sig, err := packer.Pack(blockID, blockSigData) + require.NoError(t, err) - signerIDs, sig, err := packer.Pack(blockID, blockSigData) + signers, err := signature.DecodeSignerIndicesToIdentities(committee, signerIndices) require.NoError(t, err) data, err := packer.Decode(sig) @@ -207,216 +208,8 @@ func TestInvalidSigType(t *testing.T) { encoded, err := packer.Encode(data) require.NoError(t, err) - _, err = packer.Unpack(blockID, signerIDs, encoded) - - require.True(t, errors.Is(err, model.ErrInvalidFormat)) -} - -func TestSerializeAndDeserializeSigTypes(t *testing.T) { - t.Run("nothing", func(t *testing.T) { - expected := []hotstuff.SigType{} - bytes, err := serializeToBitVector(expected) - require.NoError(t, err) - require.Equal(t, []byte{}, bytes) - - types, err := deserializeFromBitVector(bytes, len(expected)) - require.NoError(t, err) - require.Equal(t, expected, types) - }) - - t.Run("1 SigTypeStaking", func(t *testing.T) { - expected := []hotstuff.SigType{hotstuff.SigTypeStaking} - bytes, err := serializeToBitVector(expected) - require.NoError(t, err) - require.Equal(t, []byte{0}, bytes) - - types, err := deserializeFromBitVector(bytes, len(expected)) - require.NoError(t, err) - require.Equal(t, expected, types) - }) - - t.Run("1 SigTypeRandomBeacon", func(t *testing.T) { - expected := []hotstuff.SigType{hotstuff.SigTypeRandomBeacon} - bytes, err := serializeToBitVector(expected) - require.NoError(t, err) - require.Equal(t, []byte{1 << 7}, bytes) - - types, err := deserializeFromBitVector(bytes, len(expected)) - require.NoError(t, err) - require.Equal(t, expected, types) - }) - - t.Run("2 SigTypeRandomBeacon", func(t *testing.T) { - expected := []hotstuff.SigType{ - hotstuff.SigTypeRandomBeacon, - hotstuff.SigTypeRandomBeacon, - } - bytes, err := serializeToBitVector(expected) - require.NoError(t, err) - require.Equal(t, []byte{1<<7 + 1<<6}, bytes) - - types, err := deserializeFromBitVector(bytes, len(expected)) - require.NoError(t, err) - require.Equal(t, expected, types) - }) - - t.Run("8 SigTypeRandomBeacon", func(t *testing.T) { - count := 8 - expected := make([]hotstuff.SigType, 0) - for i := 0; i < count; i++ { - expected = append(expected, hotstuff.SigTypeRandomBeacon) - } - bytes, err := serializeToBitVector(expected) - require.NoError(t, err) - require.Equal(t, []byte{255}, bytes) - - types, err := deserializeFromBitVector(bytes, len(expected)) - require.NoError(t, err) - require.Equal(t, expected, types) - }) - - t.Run("8 SigTypeStaking", func(t *testing.T) { - count := 8 - expected := make([]hotstuff.SigType, 0) - for i := 0; i < count; i++ { - expected = append(expected, hotstuff.SigTypeStaking) - } - bytes, err := serializeToBitVector(expected) - require.NoError(t, err) - require.Equal(t, []byte{0}, bytes) - - types, err := deserializeFromBitVector(bytes, len(expected)) - require.NoError(t, err) - require.Equal(t, expected, types) - }) - - t.Run("9 SigTypeRandomBeacon", func(t *testing.T) { - count := 9 - expected := make([]hotstuff.SigType, 0) - for i := 0; i < count; i++ { - expected = append(expected, hotstuff.SigTypeRandomBeacon) - } - bytes, err := serializeToBitVector(expected) - require.NoError(t, err) - require.Equal(t, []byte{255, 1 << 7}, bytes) - - types, err := deserializeFromBitVector(bytes, len(expected)) - require.NoError(t, err) - require.Equal(t, expected, types) - }) - - t.Run("9 SigTypeStaking", func(t *testing.T) { - count := 9 - expected := make([]hotstuff.SigType, 0) - for i := 0; i < count; i++ { - expected = append(expected, hotstuff.SigTypeStaking) - } - bytes, err := serializeToBitVector(expected) - require.NoError(t, err) - require.Equal(t, []byte{0, 0}, bytes) - - types, err := deserializeFromBitVector(bytes, len(expected)) - require.NoError(t, err) - require.Equal(t, expected, types) - }) - - t.Run("16 SigTypeRandomBeacon, 2 groups", func(t *testing.T) { - count := 16 - expected := make([]hotstuff.SigType, 0) - for i := 0; i < count; i++ { - expected = append(expected, hotstuff.SigTypeRandomBeacon) - } - bytes, err := serializeToBitVector(expected) - require.NoError(t, err) - require.Equal(t, []byte{255, 255}, bytes) - - types, err := deserializeFromBitVector(bytes, len(expected)) - require.NoError(t, err) - require.Equal(t, expected, types) - }) - - t.Run("3 SigTypeRandomBeacon, 4 SigTypeStaking", func(t *testing.T) { - random, staking := 3, 4 - expected := make([]hotstuff.SigType, 0) - for i := 0; i < random; i++ { - expected = append(expected, hotstuff.SigTypeRandomBeacon) - } - for i := 0; i < staking; i++ { - expected = append(expected, hotstuff.SigTypeStaking) - } - bytes, err := serializeToBitVector(expected) - require.NoError(t, err) - require.Equal(t, []byte{1<<7 + 1<<6 + 1<<5}, bytes) - - types, err := deserializeFromBitVector(bytes, len(expected)) - require.NoError(t, err) - require.Equal(t, expected, types) - }) - - t.Run("3 SigTypeStaking, 4 SigTypeRandomBeacon", func(t *testing.T) { - staking, random := 3, 4 - expected := make([]hotstuff.SigType, 0) - for i := 0; i < staking; i++ { - expected = append(expected, hotstuff.SigTypeStaking) - } - for i := 0; i < random; i++ { - expected = append(expected, hotstuff.SigTypeRandomBeacon) - } - bytes, err := serializeToBitVector(expected) - require.NoError(t, err) - // 00011110 - require.Equal(t, []byte{1<<4 + 1<<3 + 1<<2 + 1<<1}, bytes) - - types, err := deserializeFromBitVector(bytes, len(expected)) - require.NoError(t, err) - require.Equal(t, expected, types) - }) - - t.Run("3 SigTypeStaking, 6 SigTypeRandomBeacon", func(t *testing.T) { - staking, random := 3, 6 - expected := make([]hotstuff.SigType, 0) - for i := 0; i < staking; i++ { - expected = append(expected, hotstuff.SigTypeStaking) - } - for i := 0; i < random; i++ { - expected = append(expected, hotstuff.SigTypeRandomBeacon) - } - bytes, err := serializeToBitVector(expected) - require.NoError(t, err) - // 00011110, 10000000 - require.Equal(t, []byte{1<<4 + 1<<3 + 1<<2 + 1<<1 + 1, 1 << 7}, bytes) - - types, err := deserializeFromBitVector(bytes, len(expected)) - require.NoError(t, err) - require.Equal(t, expected, types) - }) -} - -func TestDeserializeMismatchingBytes(t *testing.T) { - count := 9 - expected := make([]hotstuff.SigType, 0) - for i := 0; i < count; i++ { - expected = append(expected, hotstuff.SigTypeStaking) - } - bytes, err := serializeToBitVector(expected) - require.NoError(t, err) - - for invalidCount := 0; invalidCount < 100; invalidCount++ { - if invalidCount >= count && invalidCount <= 16 { - // skip correct count - continue - } - _, err := deserializeFromBitVector(bytes, invalidCount) - require.Error(t, err, fmt.Sprintf("invalid count: %v", invalidCount)) - require.True(t, errors.Is(err, model.ErrInvalidFormat), fmt.Sprintf("invalid count: %v", invalidCount)) - } -} - -func TestDeserializeInvalidTailingBits(t *testing.T) { - _, err := deserializeFromBitVector([]byte{255, 1<<7 + 1<<1}, 9) - require.Error(t, err) - require.True(t, errors.Is(err, model.ErrInvalidFormat)) - require.Contains(t, fmt.Sprintf("%v", err), "remaining bits") + _, err = packer.Unpack(signers, encoded) + require.True(t, model.IsInvalidFormatError(err)) } // TestPackUnpackWithoutRBAggregatedSig test that a packed data without random beacon signers and @@ -427,14 +220,12 @@ func TestDeserializeInvalidTailingBits(t *testing.T) { // no aggregated random beacon sigs // no random beacon signers func TestPackUnpackWithoutRBAggregatedSig(t *testing.T) { - identities := unittest.IdentityListFixture(3, unittest.WithRole(flow.RoleConsensus)) - committee := identities.NodeIDs() - // prepare data for testing + committee := unittest.IdentityListFixture(3, unittest.WithRole(flow.RoleConsensus)) blockID := unittest.IdentifierFixture() blockSigData := &hotstuff.BlockSignatureData{ - StakingSigners: committee, + StakingSigners: committee.NodeIDs(), RandomBeaconSigners: nil, AggregatedStakingSig: unittest.SignatureFixture(), AggregatedRandomBeaconSig: nil, @@ -442,13 +233,16 @@ func TestPackUnpackWithoutRBAggregatedSig(t *testing.T) { } // create packer with the committee - packer := newPacker(identities) + packer := newPacker(committee) // pack & unpack - signerIDs, sig, err := packer.Pack(blockID, blockSigData) + signerIndices, sig, err := packer.Pack(blockID, blockSigData) + require.NoError(t, err) + + signers, err := signature.DecodeSignerIndicesToIdentities(committee, signerIndices) require.NoError(t, err) - unpacked, err := packer.Unpack(blockID, signerIDs, sig) + unpacked, err := packer.Unpack(signers, sig) require.NoError(t, err) // check that the unpack data match with the original data @@ -461,8 +255,8 @@ func TestPackUnpackWithoutRBAggregatedSig(t *testing.T) { require.Empty(t, unpacked.AggregatedRandomBeaconSig) // check the packed signer IDs - expectedSignerIDs := append([]flow.Identifier{}, blockSigData.StakingSigners...) - require.Equal(t, expectedSignerIDs, signerIDs) + expectedSignerIDs := append(flow.IdentifierList{}, blockSigData.StakingSigners...) + require.Equal(t, expectedSignerIDs, signers.NodeIDs()) } // TestPackWithoutRBAggregatedSig tests that packer correctly handles BlockSignatureData diff --git a/consensus/hotstuff/validator/validator.go b/consensus/hotstuff/validator/validator.go index 2dc88ed96bb..5b00dc48f05 100644 --- a/consensus/hotstuff/validator/validator.go +++ b/consensus/hotstuff/validator/validator.go @@ -7,7 +7,7 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module/signature" ) // Validator is responsible for validating QC, Block and Vote @@ -32,9 +32,11 @@ func New( } } -// ValidateQC validates the QC -// qc - the qc to be validated -// block - the block that the qc is pointing to +// ValidateQC checks the validity of a QC for a given block. Inputs: +// * qc - the qc to be validated +// * block - the block that the qc is pointing to +// During normal operations, the following error returns are expected: +// * model.InvalidBlockError if the QC is invalid func (v *Validator) ValidateQC(qc *flow.QuorumCertificate, block *model.Block) error { if qc.BlockID != block.BlockID { // Sanity check! Failing indicates a bug in the higher-level logic @@ -46,13 +48,18 @@ func (v *Validator) ValidateQC(qc *flow.QuorumCertificate, block *model.Block) e // Retrieve full Identities of all legitimate consensus participants and the Identities of the qc's signers // IdentityList returned by hotstuff.Committee contains only legitimate consensus participants for the specified block (must have positive weight) - allParticipants, err := v.committee.Identities(block.BlockID, filter.Any) + allParticipants, err := v.committee.Identities(block.BlockID) if err != nil { return fmt.Errorf("could not get consensus participants for block %s: %w", block.BlockID, err) } - signers := allParticipants.Filter(filter.HasNodeID(qc.SignerIDs...)) // resulting IdentityList contains no duplicates - if len(signers) != len(qc.SignerIDs) { - return newInvalidBlockError(block, model.NewInvalidSignerErrorf("some qc signers are duplicated or invalid consensus participants at block %x", block.BlockID)) + + signers, err := signature.DecodeSignerIndicesToIdentities(allParticipants, qc.SignerIndices) + if err != nil { + if signature.IsDecodeSignerIndicesError(err) { + return newInvalidBlockError(block, fmt.Errorf("invalid signer indices: %w", err)) + } + // unexpected error + return fmt.Errorf("unexpected internal error decoding signer indices: %w", err) } // determine whether signers reach minimally required weight threshold for consensus @@ -64,14 +71,18 @@ func (v *Validator) ValidateQC(qc *flow.QuorumCertificate, block *model.Block) e // verify whether the signature bytes are valid for the QC in the context of the protocol state err = v.verifier.VerifyQC(signers, qc.SigData, block) if err != nil { - // Theoretically, `VerifyQC` could also return a `model.InvalidSignerError`. However, - // for the time being, we assume that _every_ HotStuff participant is also a member of - // the random beacon committee. Consequently, `InvalidSignerError` should not occur atm. - // TODO: if the random beacon committee is a strict subset of the HotStuff committee, - // we expect `model.InvalidSignerError` here during normal operations. + // Considerations about other errors that `VerifyQC` could return: + // * model.InvalidSignerError: for the time being, we assume that _every_ HotStuff participant + // is also a member of the random beacon committee. Consequently, `InvalidSignerError` should + // not occur atm. + // TODO: if the random beacon committee is a strict subset of the HotStuff committee, + // we expect `model.InvalidSignerError` here during normal operations. + // * model.InsufficientSignaturesError: we previously checked the total weight of all signers + // meets the supermajority threshold, which is a _positive_ number. Hence, there must be at + // least one signer. Hence, receiving this error would be a symptom of a fatal internal bug. switch { - case errors.Is(err, model.ErrInvalidFormat): - return newInvalidBlockError(block, fmt.Errorf("QC's signature data has an invalid structure: %w", err)) + case model.IsInvalidFormatError(err): + return newInvalidBlockError(block, fmt.Errorf("QC's signature data has an invalid structure: %w", err)) case errors.Is(err, model.ErrInvalidSignature): return newInvalidBlockError(block, fmt.Errorf("QC contains invalid signature(s): %w", err)) default: @@ -158,7 +169,7 @@ func (v *Validator) ValidateVote(vote *model.Vote, block *model.Block) (*flow.Id // the random beacon committee. Consequently, `InvalidSignerError` should not occur atm. // TODO: if the random beacon committee is a strict subset of the HotStuff committee, // we expect `model.InvalidSignerError` here during normal operations. - if errors.Is(err, model.ErrInvalidFormat) || errors.Is(err, model.ErrInvalidSignature) { + if model.IsInvalidFormatError(err) || errors.Is(err, model.ErrInvalidSignature) { return nil, newInvalidVoteError(vote, err) } return nil, fmt.Errorf("cannot verify signature for vote (%x): %w", vote.ID(), err) diff --git a/consensus/hotstuff/validator/validator_test.go b/consensus/hotstuff/validator/validator_test.go index 5a1161f8a6d..a98ae56cdef 100644 --- a/consensus/hotstuff/validator/validator_test.go +++ b/consensus/hotstuff/validator/validator_test.go @@ -7,8 +7,11 @@ import ( "testing" "time" + "github.com/onflow/flow-go/module/signature" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/consensus/hotstuff/helper" @@ -51,13 +54,21 @@ func (ps *ProposalSuite) SetupTest() { ps.parent = helper.MakeBlock( helper.WithBlockView(ps.finalized), ) + + indices, err := signature.EncodeSignersToIndices(ps.participants.NodeIDs(), ps.participants.NodeIDs()) + require.NoError(ps.T(), err) + ps.block = helper.MakeBlock( helper.WithBlockView(ps.finalized+1), helper.WithBlockProposer(ps.leader.NodeID), helper.WithParentBlock(ps.parent), - helper.WithParentSigners(ps.participants.NodeIDs()), + helper.WithParentSigners(indices), ) - ps.voters = ps.participants.Filter(filter.HasNodeID(ps.block.QC.SignerIDs...)) + + voterIDs, err := signature.DecodeSignerIndicesToIdentifiers(ps.participants.NodeIDs(), ps.block.QC.SignerIndices) + require.NoError(ps.T(), err) + + ps.voters = ps.participants.Filter(filter.HasNodeID(voterIDs...)) ps.proposal = &model.Proposal{Block: ps.block} ps.vote = ps.proposal.ProposerVote() ps.voter = ps.leader @@ -65,9 +76,9 @@ func (ps *ProposalSuite) SetupTest() { // set up the mocked hotstuff Committee state ps.committee = &mocks.Committee{} ps.committee.On("LeaderForView", ps.block.View).Return(ps.leader.NodeID, nil) - ps.committee.On("Identities", mock.Anything, mock.Anything).Return( - func(blockID flow.Identifier, selector flow.IdentityFilter) flow.IdentityList { - return ps.participants.Filter(selector) + ps.committee.On("Identities", mock.Anything).Return( + func(blockID flow.Identifier) flow.IdentityList { + return ps.participants }, nil, ) @@ -113,10 +124,10 @@ func (ps *ProposalSuite) TestProposalSignatureError() { func (ps *ProposalSuite) TestProposalSignatureInvalidFormat() { - // change the verifier to fail signature validation with ErrInvalidFormat error + // change the verifier to fail signature validation with InvalidFormatError error *ps.verifier = mocks.Verifier{} ps.verifier.On("VerifyQC", ps.voters, ps.block.QC.SigData, ps.parent).Return(nil) - ps.verifier.On("VerifyVote", ps.voter, ps.vote.SigData, ps.block).Return(fmt.Errorf("%w", model.ErrInvalidFormat)) + ps.verifier.On("VerifyVote", ps.voter, ps.vote.SigData, ps.block).Return(model.NewInvalidFormatErrorf("")) // check that validation now fails err := ps.validator.ValidateProposal(ps.proposal) @@ -204,7 +215,7 @@ func (ps *ProposalSuite) TestProposalMissingParentLower() { } // TestProposalQCInvalid checks that Validator handles the verifier's error returns correctly. -// In case of `model.ErrInvalidFormat` and model.ErrInvalidSignature`, we expect the Validator +// In case of `model.InvalidFormatError` and model.ErrInvalidSignature`, we expect the Validator // to recognize those as an invalid QC, i.e. returns an `model.InvalidBlockError`. // In contrast, unexpected exceptions and `model.InvalidSignerError` should _not_ be // interpreted as a sign of an invalid QC. @@ -222,8 +233,7 @@ func (ps *ProposalSuite) TestProposalQCInvalid() { ps.Run("invalid format", func() { *ps.verifier = mocks.Verifier{} - ps.verifier.On("VerifyQC", ps.voters, ps.block.QC.SigData, ps.parent).Return( - fmt.Errorf("invalid qc: %w", model.ErrInvalidFormat)) + ps.verifier.On("VerifyQC", ps.voters, ps.block.QC.SigData, ps.parent).Return(model.NewInvalidFormatErrorf("invalid qc")) ps.verifier.On("VerifyVote", ps.voter, ps.vote.SigData, ps.block).Return(nil) // check that validation fails and the failure case is recognized as an invalid block @@ -403,7 +413,6 @@ type QCSuite struct { } func (qs *QCSuite) SetupTest() { - // create a list of 10 nodes with 1-weight each qs.participants = unittest.IdentityListFixture(10, unittest.WithRole(flow.RoleConsensus), @@ -415,13 +424,16 @@ func (qs *QCSuite) SetupTest() { // create a block that has the signers in its QC qs.block = helper.MakeBlock() - qs.qc = helper.MakeQC(helper.WithQCBlock(qs.block), helper.WithQCSigners(qs.signers.NodeIDs())) + indices, err := signature.EncodeSignersToIndices(qs.participants.NodeIDs(), qs.signers.NodeIDs()) + require.NoError(qs.T(), err) + + qs.qc = helper.MakeQC(helper.WithQCBlock(qs.block), helper.WithQCSigners(indices)) // return the correct participants and identities from view state qs.committee = &mocks.Committee{} - qs.committee.On("Identities", mock.Anything, mock.Anything).Return( - func(blockID flow.Identifier, selector flow.IdentityFilter) flow.IdentityList { - return qs.participants.Filter(selector) + qs.committee.On("Identities", mock.Anything).Return( + func(blockID flow.Identifier) flow.IdentityList { + return qs.participants }, nil, ) @@ -434,21 +446,12 @@ func (qs *QCSuite) SetupTest() { qs.validator = New(qs.committee, nil, qs.verifier) } +// TestQCOK verifies the default happy case func (qs *QCSuite) TestQCOK() { - - // check the default happy case passes err := qs.validator.ValidateQC(qs.qc, qs.block) assert.NoError(qs.T(), err, "a valid QC should be accepted") } -// TestQCInvalidSignersError tests that a qc fails validation if: -// QC signer's Identities cannot all be retrieved (some are not valid consensus participants) -func (qs *QCSuite) TestQCInvalidSignersError() { - qs.participants = qs.participants[1:] // remove participant[0] from the list of valid consensus participant - err := qs.validator.ValidateQC(qs.qc, qs.block) // the QC should not be validated anymore - assert.True(qs.T(), model.IsInvalidBlockError(err), "if some signers are invalid consensus participants, an ErrorInvalidBlock error should be raised") -} - // TestQCRetrievingParticipantsError tests that validation errors if: // there is an error retrieving identities of consensus participants func (qs *QCSuite) TestQCRetrievingParticipantsError() { @@ -467,10 +470,13 @@ func (qs *QCSuite) TestQCRetrievingParticipantsError() { func (qs *QCSuite) TestQCInsufficientWeight() { // signers only have weight 6 out of 10 total (NOT have a supermajority) qs.signers = qs.participants[:6] - qs.qc = helper.MakeQC(helper.WithQCBlock(qs.block), helper.WithQCSigners(qs.signers.NodeIDs())) + indices, err := signature.EncodeSignersToIndices(qs.participants.NodeIDs(), qs.signers.NodeIDs()) + require.NoError(qs.T(), err) + + qs.qc = helper.MakeQC(helper.WithQCBlock(qs.block), helper.WithQCSigners(indices)) // the QC should not be validated anymore - err := qs.validator.ValidateQC(qs.qc, qs.block) + err = qs.validator.ValidateQC(qs.qc, qs.block) assert.Error(qs.T(), err, "a QC should be rejected if it has insufficient voted weight") // we should get a threshold error to bubble up for extra info @@ -491,24 +497,46 @@ func (qs *QCSuite) TestQCSignatureError() { assert.False(qs.T(), model.IsInvalidBlockError(err), "unspecific internal errors should not result in ErrorInvalidBlock error") } +// TestQCSignatureInvalid verifies that the Validator correctly handles the model.ErrInvalidSignature. +// This error return from `Verifier.VerifyQC` is an expected failure case in case of a byzantine input, where +// one of the signatures in the QC is broken. Hence, the Validator should wrap it as InvalidBlockError. func (qs *QCSuite) TestQCSignatureInvalid() { - // change the verifier to fail the QC signature *qs.verifier = mocks.Verifier{} - qs.verifier.On("VerifyQC", qs.signers, qs.qc.SigData, qs.block).Return(fmt.Errorf("invalid qc: %w", model.ErrInvalidSignature)) + qs.verifier.On("VerifyQC", qs.signers, qs.qc.SigData, qs.block).Return( + fmt.Errorf("invalid signer sig: %w", model.ErrInvalidSignature)) - // the QC should no longer be validation + // the QC be considered as invalid err := qs.validator.ValidateQC(qs.qc, qs.block) assert.True(qs.T(), model.IsInvalidBlockError(err), "if the signature is invalid an ErrorInvalidBlock error should be raised") } +// TestQCSignatureInvalidFormat verifies that the Validator correctly handles the model.InvalidFormatError. +// This error return from `Verifier.VerifyQC` is an expected failure case in case of a byzantine input, where +// some binary vector (e.g. `sigData`) is broken. Hence, the Validator should wrap it as InvalidBlockError. func (qs *QCSuite) TestQCSignatureInvalidFormat() { - // change the verifier to fail the QC signature *qs.verifier = mocks.Verifier{} - qs.verifier.On("VerifyQC", qs.signers, qs.qc.SigData, qs.block).Return(fmt.Errorf("%w", model.ErrInvalidFormat)) + qs.verifier.On("VerifyQC", qs.signers, qs.qc.SigData, qs.block).Return( + fmt.Errorf("%w", model.NewInvalidFormatErrorf("invalid sigType"))) - // the QC should no longer be validation + // the QC be considered as invalid err := qs.validator.ValidateQC(qs.qc, qs.block) assert.True(qs.T(), model.IsInvalidBlockError(err), "if the signature has an invalid format, an ErrorInvalidBlock error should be raised") } + +// TestQCEmptySigners verifies that the Validator correctly handles the model.InsufficientSignaturesError: +// In the validator, we previously checked the total weight of all signers meets the supermajority threshold, +// which is a _positive_ number. Hence, there must be at least one signer. Hence, `Verifier.VerifyQC` +// returning this error would be a symptom of a fatal internal bug. The Validator should _not_ interpret +// this error as an invalid QC / invalid block, i.e. it should _not_ return an `InvalidBlockError`. +func (qs *QCSuite) TestQCEmptySigners() { + *qs.verifier = mocks.Verifier{} + qs.verifier.On("VerifyQC", mock.Anything, qs.qc.SigData, qs.block).Return( + fmt.Errorf("%w", model.NewInsufficientSignaturesErrorf(""))) + + // the Validator should _not_ interpret this as a invalid QC, but as an internal error + err := qs.validator.ValidateQC(qs.qc, qs.block) + assert.True(qs.T(), model.IsInsufficientSignaturesError(err)) // unexpected error should be wrapped and propagated upwards + assert.False(qs.T(), model.IsInvalidBlockError(err), err, "should _not_ interpret this as a invalid QC, but as an internal error") +} diff --git a/consensus/hotstuff/verification/combined_signer_v2.go b/consensus/hotstuff/verification/combined_signer_v2.go index d6e226868c4..7c174bbe3de 100644 --- a/consensus/hotstuff/verification/combined_signer_v2.go +++ b/consensus/hotstuff/verification/combined_signer_v2.go @@ -5,11 +5,11 @@ import ( "fmt" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/consensus/hotstuff/signature" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/encoding" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/signature" ) // CombinedSigner creates votes for the main consensus. diff --git a/consensus/hotstuff/verification/combined_signer_v2_test.go b/consensus/hotstuff/verification/combined_signer_v2_test.go index 1479987cba2..b13d7bbbf9d 100644 --- a/consensus/hotstuff/verification/combined_signer_v2_test.go +++ b/consensus/hotstuff/verification/combined_signer_v2_test.go @@ -14,6 +14,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/local" modulemock "github.com/onflow/flow-go/module/mock" + msig "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/state/protocol" storagemock "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" @@ -79,7 +80,7 @@ func TestCombinedSignWithDKGKey(t *testing.T) { beaconSig, err := dkgKey.Sign(msg, crypto.NewBLSKMAC(encoding.RandomBeaconTag)) require.NoError(t, err) - expectedSig := signature.EncodeDoubleSig(stakingSig, beaconSig) + expectedSig := msig.EncodeDoubleSig(stakingSig, beaconSig) require.Equal(t, expectedSig, proposal.SigData) // vote should be valid @@ -184,8 +185,11 @@ func TestCombinedSignWithNoDKGKey(t *testing.T) { require.Equal(t, expectedStakingSig, crypto.Signature(proposal.SigData)) } -// Test_VerifyQC checks that a QC without any signers is rejected right away without calling into any sub-components -func Test_VerifyQC(t *testing.T) { +// Test_VerifyQC_EmptySigners checks that Verifier returns an `model.InsufficientSignaturesError` +// if `signers` input is empty or nil. This check should happen _before_ the Verifier calls into +// any sub-components, because some (e.g. `crypto.AggregateBLSPublicKeys`) don't provide sufficient +// sentinel errors to distinguish between internal problems and external byzantine inputs. +func Test_VerifyQC_EmptySigners(t *testing.T) { committee := &mocks.Committee{} packer := signature.NewConsensusSigDataPacker(committee) verifier := NewCombinedVerifier(committee, packer) @@ -195,8 +199,8 @@ func Test_VerifyQC(t *testing.T) { sigData := unittest.QCSigDataFixture() err := verifier.VerifyQC([]*flow.Identity{}, sigData, block) - require.ErrorIs(t, err, model.ErrInvalidFormat) + require.True(t, model.IsInsufficientSignaturesError(err)) err = verifier.VerifyQC(nil, sigData, block) - require.ErrorIs(t, err, model.ErrInvalidFormat) + require.True(t, model.IsInsufficientSignaturesError(err)) } diff --git a/consensus/hotstuff/verification/combined_signer_v3.go b/consensus/hotstuff/verification/combined_signer_v3.go index 83f59db83e9..708559e9eb7 100644 --- a/consensus/hotstuff/verification/combined_signer_v3.go +++ b/consensus/hotstuff/verification/combined_signer_v3.go @@ -4,13 +4,12 @@ import ( "errors" "fmt" - "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/consensus/hotstuff/signature" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/encoding" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/signature" ) // CombinedSignerV3 creates votes for the main consensus. @@ -107,7 +106,7 @@ func (c *CombinedSignerV3) genSigData(block *model.Block) ([]byte, error) { return nil, fmt.Errorf("could not generate staking signature: %w", err) } - return signature.EncodeSingleSig(hotstuff.SigTypeStaking, stakingSig), nil + return signature.EncodeSingleSig(encoding.SigTypeStaking, stakingSig), nil } return nil, fmt.Errorf("could not get random beacon private key for view %d: %w", block.View, err) } @@ -119,5 +118,5 @@ func (c *CombinedSignerV3) genSigData(block *model.Block) ([]byte, error) { return nil, fmt.Errorf("could not generate beacon signature: %w", err) } - return signature.EncodeSingleSig(hotstuff.SigTypeRandomBeacon, beaconShare), nil + return signature.EncodeSingleSig(encoding.SigTypeRandomBeacon, beaconShare), nil } diff --git a/consensus/hotstuff/verification/combined_signer_v3_test.go b/consensus/hotstuff/verification/combined_signer_v3_test.go index 6d2b2b06db8..5515430a4e0 100644 --- a/consensus/hotstuff/verification/combined_signer_v3_test.go +++ b/consensus/hotstuff/verification/combined_signer_v3_test.go @@ -16,6 +16,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/local" modulemock "github.com/onflow/flow-go/module/mock" + msig "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/state/protocol" storagemock "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" @@ -76,7 +77,7 @@ func TestCombinedSignWithDKGKeyV3(t *testing.T) { beaconSig, err := dkgKey.Sign(msg, crypto.NewBLSKMAC(encoding.RandomBeaconTag)) require.NoError(t, err) - expectedSig := signature.EncodeSingleSig(hotstuff.SigTypeRandomBeacon, beaconSig) + expectedSig := msig.EncodeSingleSig(encoding.SigTypeRandomBeacon, beaconSig) require.Equal(t, expectedSig, proposal.SigData) // Vote from a node that is _not_ part of the Random Beacon committee should be rejected. @@ -145,7 +146,7 @@ func TestCombinedSignWithNoDKGKeyV3(t *testing.T) { stakingSig, err := stakingPriv.Sign(msg, crypto.NewBLSKMAC(encoding.ConsensusVoteTag)) require.NoError(t, err) - expectedSig := signature.EncodeSingleSig(hotstuff.SigTypeStaking, stakingSig) + expectedSig := msig.EncodeSingleSig(encoding.SigTypeStaking, stakingSig) // check the signature only has staking sig require.Equal(t, expectedSig, proposal.SigData) @@ -187,7 +188,7 @@ func Test_VerifyQCV3(t *testing.T) { // first, we check that our testing setup works for a correct QC t.Run("valid QC", func(t *testing.T) { packer := &mocks.Packer{} - packer.On("Unpack", block.BlockID, mock.Anything, packedSigData).Return(&unpackedSigData, nil) + packer.On("Unpack", mock.Anything, packedSigData).Return(&unpackedSigData, nil) verifier := NewCombinedVerifierV3(committee, packer) err := verifier.VerifyQC(allSigners, packedSigData, block) @@ -205,7 +206,7 @@ func Test_VerifyQCV3(t *testing.T) { sd.AggregatedStakingSig = []byte{} packer := &mocks.Packer{} - packer.On("Unpack", block.BlockID, mock.Anything, packedSigData).Return(&sd, nil) + packer.On("Unpack", mock.Anything, packedSigData).Return(&sd, nil) verifier := NewCombinedVerifierV3(committee, packer) err := verifier.VerifyQC(allSigners, packedSigData, block) require.NoError(t, err) @@ -220,10 +221,10 @@ func Test_VerifyQCV3(t *testing.T) { sd.StakingSigners = []flow.Identifier{} packer := &mocks.Packer{} - packer.On("Unpack", block.BlockID, mock.Anything, packedSigData).Return(&sd, nil) + packer.On("Unpack", mock.Anything, packedSigData).Return(&sd, nil) verifier := NewCombinedVerifierV3(committee, packer) err := verifier.VerifyQC(allSigners, packedSigData, block) - require.ErrorIs(t, err, model.ErrInvalidFormat) + require.True(t, model.IsInvalidFormatError(err)) }) // Modify the correct QC: empty list of random beacon signers. @@ -233,10 +234,10 @@ func Test_VerifyQCV3(t *testing.T) { sd.RandomBeaconSigners = []flow.Identifier{} packer := &mocks.Packer{} - packer.On("Unpack", block.BlockID, mock.Anything, packedSigData).Return(&sd, nil) + packer.On("Unpack", mock.Anything, packedSigData).Return(&sd, nil) verifier := NewCombinedVerifierV3(committee, packer) err := verifier.VerifyQC(allSigners, packedSigData, block) - require.ErrorIs(t, err, model.ErrInvalidFormat) + require.True(t, model.IsInvalidFormatError(err)) }) // Modify the correct QC: too few random beacon signers. @@ -249,14 +250,34 @@ func Test_VerifyQCV3(t *testing.T) { sd.AggregatedRandomBeaconSig = aggregatedSignature(t, privRbKeyShares[:5], msg, encoding.RandomBeaconTag) packer := &mocks.Packer{} - packer.On("Unpack", block.BlockID, mock.Anything, packedSigData).Return(&sd, nil) + packer.On("Unpack", mock.Anything, packedSigData).Return(&sd, nil) verifier := NewCombinedVerifierV3(committee, packer) err := verifier.VerifyQC(allSigners, packedSigData, block) - require.ErrorIs(t, err, model.ErrInvalidFormat) + require.True(t, model.IsInvalidFormatError(err)) }) } +// Test_VerifyQC_EmptySignersV3 checks that Verifier returns an `model.InsufficientSignaturesError` +// if `signers` input is empty or nil. This check should happen _before_ the Verifier calls into +// any sub-components, because some (e.g. `crypto.AggregateBLSPublicKeys`) don't provide sufficient +// sentinel errors to distinguish between internal problems and external byzantine inputs. +func Test_VerifyQC_EmptySignersV3(t *testing.T) { + committee := &mocks.Committee{} + packer := signature.NewConsensusSigDataPacker(committee) + verifier := NewCombinedVerifier(committee, packer) + + header := unittest.BlockHeaderFixture() + block := model.BlockFromFlow(&header, header.View-1) + sigData := unittest.QCSigDataFixture() + + err := verifier.VerifyQC([]*flow.Identity{}, sigData, block) + require.True(t, model.IsInsufficientSignaturesError(err)) + + err = verifier.VerifyQC(nil, sigData, block) + require.True(t, model.IsInsufficientSignaturesError(err)) +} + func generateIdentitiesForPrivateKeys(t *testing.T, pivKeys []crypto.PrivateKey) flow.IdentityList { ids := make([]*flow.Identity, 0, len(pivKeys)) for _, k := range pivKeys { diff --git a/consensus/hotstuff/verification/combined_verifier_v2.go b/consensus/hotstuff/verification/combined_verifier_v2.go index 250c579b229..57adfd4bcc4 100644 --- a/consensus/hotstuff/verification/combined_verifier_v2.go +++ b/consensus/hotstuff/verification/combined_verifier_v2.go @@ -4,15 +4,16 @@ package verification import ( + "errors" "fmt" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/consensus/hotstuff/signature" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/encoding" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/state/protocol" ) @@ -46,7 +47,7 @@ func NewCombinedVerifier(committee hotstuff.Committee, packer hotstuff.Packer) * // VerifyVote verifies the validity of a combined signature from a vote. // Usually this method is only used to verify the proposer's vote, which is // the vote included in a block proposal. -// * model.ErrInvalidFormat if the signature has an incompatible format. +// * model.InvalidFormatError if the signature has an incompatible format. // * model.ErrInvalidSignature is the signature is invalid // * model.InvalidSignerError if signer is _not_ part of the random beacon committee // * unexpected errors should be treated as symptoms of bugs or uncovered @@ -59,7 +60,10 @@ func (c *CombinedVerifier) VerifyVote(signer *flow.Identity, sigData []byte, blo // split the two signatures from the vote stakingSig, beaconShare, err := signature.DecodeDoubleSig(sigData) if err != nil { - return fmt.Errorf("could not split signature for block %v: %w", block.BlockID, err) + if errors.Is(err, signature.ErrInvalidSignatureFormat) { + return model.NewInvalidFormatErrorf("could not split signature for block %v: %w", block.BlockID, err) + } + return fmt.Errorf("unexpected internal error while splitting signature for block %v: %w", block.BlockID, err) } dkg, err := c.committee.DKG(block.BlockID) @@ -106,14 +110,17 @@ func (c *CombinedVerifier) VerifyVote(signer *flow.Identity, sigData []byte, blo // VerifyQC checks the cryptographic validity of the QC's `sigData` for the // given block. It is the responsibility of the calling code to ensure -// that all `voters` are authorized, without duplicates. Return values: +// that all `signers` are authorized, without duplicates. Return values: // - nil if `sigData` is cryptographically valid -// - model.ErrInvalidFormat if `sigData` has an incompatible format +// - model.InsufficientSignaturesError if `signers` is empty. +// Depending on the order of checks in the higher-level logic this error might +// be an indicator of a external byzantine input or an internal bug. +// - model.InvalidFormatError if `sigData` has an incompatible format // - model.ErrInvalidSignature if a signature is invalid // - error if running into any unexpected exception (i.e. fatal error) func (c *CombinedVerifier) VerifyQC(signers flow.IdentityList, sigData []byte, block *model.Block) error { if len(signers) == 0 { - return fmt.Errorf("empty list of signers: %w", model.ErrInvalidFormat) + return model.NewInsufficientSignaturesErrorf("empty list of signers") } dkg, err := c.committee.DKG(block.BlockID) if err != nil { @@ -121,7 +128,7 @@ func (c *CombinedVerifier) VerifyQC(signers flow.IdentityList, sigData []byte, b } // unpack sig data using packer - blockSigData, err := c.packer.Unpack(block.BlockID, signers.NodeIDs(), sigData) + blockSigData, err := c.packer.Unpack(signers, sigData) if err != nil { return fmt.Errorf("could not split signature: %w", err) } diff --git a/consensus/hotstuff/verification/combined_verifier_v3.go b/consensus/hotstuff/verification/combined_verifier_v3.go index 0c558686d50..21a99d2065e 100644 --- a/consensus/hotstuff/verification/combined_verifier_v3.go +++ b/consensus/hotstuff/verification/combined_verifier_v3.go @@ -4,11 +4,11 @@ package verification import ( + "errors" "fmt" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/consensus/hotstuff/signature" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/encoding" @@ -46,7 +46,7 @@ func NewCombinedVerifierV3(committee hotstuff.Committee, packer hotstuff.Packer) // VerifyVote verifies the validity of a combined signature from a vote. // Usually this method is only used to verify the proposer's vote, which is // the vote included in a block proposal. -// * model.ErrInvalidFormat if the signature has an incompatible format. +// * model.InvalidFormatError if the signature has an incompatible format. // * model.ErrInvalidSignature is the signature is invalid // * model.InvalidSignerError if signer is _not_ part of the random beacon committee // * unexpected errors should be treated as symptoms of bugs or uncovered @@ -58,13 +58,16 @@ func (c *CombinedVerifierV3) VerifyVote(signer *flow.Identity, sigData []byte, b // create the to-be-signed message msg := MakeVoteMessage(block.View, block.BlockID) - sigType, sig, err := signature.DecodeSingleSig(sigData) + sigType, sig, err := msig.DecodeSingleSig(sigData) if err != nil { - return fmt.Errorf("could not decode signature for block %v: %w", block.BlockID, err) + if errors.Is(err, msig.ErrInvalidSignatureFormat) { + return model.NewInvalidFormatErrorf("could not decode signature for block %v: %w", block.BlockID, err) + } + return fmt.Errorf("unexpected internal error while decoding signature for block %v: %w", block.BlockID, err) } switch sigType { - case hotstuff.SigTypeStaking: + case encoding.SigTypeStaking: // verify each signature against the message stakingValid, err := signer.StakingPubKey.Verify(sig, msg, c.stakingHasher) if err != nil { @@ -74,7 +77,7 @@ func (c *CombinedVerifierV3) VerifyVote(signer *flow.Identity, sigData []byte, b return fmt.Errorf("invalid staking sig for block %v: %w", block.BlockID, model.ErrInvalidSignature) } - case hotstuff.SigTypeRandomBeacon: + case encoding.SigTypeRandomBeacon: dkg, err := c.committee.DKG(block.BlockID) if err != nil { return fmt.Errorf("could not get dkg: %w", err) @@ -97,7 +100,7 @@ func (c *CombinedVerifierV3) VerifyVote(signer *flow.Identity, sigData []byte, b } default: - return fmt.Errorf("invalid signature type %d: %w", sigType, model.ErrInvalidFormat) + return model.NewInvalidFormatErrorf("invalid signature type %d", sigType) } return nil @@ -105,15 +108,21 @@ func (c *CombinedVerifierV3) VerifyVote(signer *flow.Identity, sigData []byte, b // VerifyQC checks the cryptographic validity of the QC's `sigData` for the // given block. It is the responsibility of the calling code to ensure -// that all `voters` are authorized, without duplicates. Return values: +// that all `signers` are authorized, without duplicates. Return values: // - nil if `sigData` is cryptographically valid -// - model.ErrInvalidFormat if `sigData` has an incompatible format +// - model.InsufficientSignaturesError if `signers` is empty. +// Depending on the order of checks in the higher-level logic this error might +// be an indicator of a external byzantine input or an internal bug. +// - model.InvalidFormatError if `sigData` has an incompatible format // - model.ErrInvalidSignature if a signature is invalid // - model.InvalidSignerError if a signer is _not_ part of the random beacon committee // - error if running into any unexpected exception (i.e. fatal error) // This implementation already support the cases, where the DKG committee is a // _strict subset_ of the full consensus committee. func (c *CombinedVerifierV3) VerifyQC(signers flow.IdentityList, sigData []byte, block *model.Block) error { + if len(signers) == 0 { + return model.NewInsufficientSignaturesErrorf("empty list of signers") + } signerIdentities := signers.Lookup() dkg, err := c.committee.DKG(block.BlockID) if err != nil { @@ -121,7 +130,7 @@ func (c *CombinedVerifierV3) VerifyQC(signers flow.IdentityList, sigData []byte, } // unpack sig data using packer - blockSigData, err := c.packer.Unpack(block.BlockID, signers.NodeIDs(), sigData) + blockSigData, err := c.packer.Unpack(signers, sigData) if err != nil { return fmt.Errorf("could not split signature: %w", err) } @@ -168,7 +177,7 @@ func (c *CombinedVerifierV3) VerifyQC(signers flow.IdentityList, sigData []byte, // To construct a valid QC, the node generating it must have collected _more_ than `threshold` signatures. // Reporting fewer random beacon signers, the node is purposefully miss-representing node contributions. // We reject QCs with under-reported random beacon signers to reduce the surface of potential grieving attacks. - return fmt.Errorf("require at least %d random beacon sig shares but only got %d: %w", threshold+1, numRbSigners, model.ErrInvalidFormat) + return model.NewInvalidFormatErrorf("require at least %d random beacon sig shares but only got %d", threshold+1, numRbSigners) } beaconPubKeys := make([]crypto.PublicKey, 0, numRbSigners) for _, signerID := range blockSigData.RandomBeaconSigners { @@ -201,7 +210,7 @@ func (c *CombinedVerifierV3) VerifyQC(signers flow.IdentityList, sigData []byte, numStakingSigners := len(blockSigData.StakingSigners) if numStakingSigners == 0 { if len(blockSigData.AggregatedStakingSig) > 0 { - return fmt.Errorf("all replicas signed with random beacon keys, but QC has aggregated staking sig for block %v: %w", block.BlockID, model.ErrInvalidFormat) + return model.NewInvalidFormatErrorf("all replicas signed with random beacon keys, but QC has aggregated staking sig for block %v", block.BlockID) } // no aggregated staking sig to verify return nil diff --git a/consensus/hotstuff/verification/staking_signer_test.go b/consensus/hotstuff/verification/staking_signer_test.go index a7987bf8208..f9b083ca105 100644 --- a/consensus/hotstuff/verification/staking_signer_test.go +++ b/consensus/hotstuff/verification/staking_signer_test.go @@ -115,8 +115,8 @@ func TestStakingSigner_VerifyQC(t *testing.T) { verifier := NewStakingVerifier() err := verifier.VerifyQC([]*flow.Identity{}, sigData, block) - require.ErrorIs(t, err, model.ErrInvalidFormat) + require.True(t, model.IsInvalidFormatError(err)) err = verifier.VerifyQC(nil, sigData, block) - require.ErrorIs(t, err, model.ErrInvalidFormat) + require.True(t, model.IsInvalidFormatError(err)) } diff --git a/consensus/hotstuff/verification/staking_verifier.go b/consensus/hotstuff/verification/staking_verifier.go index a287aca4803..eed9abd6231 100644 --- a/consensus/hotstuff/verification/staking_verifier.go +++ b/consensus/hotstuff/verification/staking_verifier.go @@ -33,7 +33,7 @@ func NewStakingVerifier() *StakingVerifier { // Usually this method is only used to verify the proposer's vote, which is // the vote included in a block proposal. // The implementation returns the following sentinel errors: -// * model.ErrInvalidFormat if the signature has an incompatible format. +// * model.InvalidFormatError if the signature has an incompatible format. // * model.ErrInvalidSignature is the signature is invalid // * unexpected errors should be treated as symptoms of bugs or uncovered // edge cases in the logic (i.e. as fatal) @@ -58,14 +58,14 @@ func (v *StakingVerifier) VerifyVote(signer *flow.Identity, sigData []byte, bloc // given block. It is the responsibility of the calling code to ensure // that all `voters` are authorized, without duplicates. Return values: // - nil if `sigData` is cryptographically valid -// - model.ErrInvalidFormat if `sigData` has an incompatible format +// - model.InvalidFormatError if `sigData` has an incompatible format // - model.ErrInvalidSignature if a signature is invalid // - unexpected errors should be treated as symptoms of bugs or uncovered // edge cases in the logic (i.e. as fatal) // In the single verification case, `sigData` represents a single signature (`crypto.Signature`). func (v *StakingVerifier) VerifyQC(signers flow.IdentityList, sigData []byte, block *model.Block) error { if len(signers) == 0 { - return fmt.Errorf("empty list of signers: %w", model.ErrInvalidFormat) + return model.NewInvalidFormatErrorf("empty list of signers") } msg := MakeVoteMessage(block.View, block.BlockID) diff --git a/consensus/hotstuff/verifier.go b/consensus/hotstuff/verifier.go index fa7e2309295..ee71e2f4f09 100644 --- a/consensus/hotstuff/verifier.go +++ b/consensus/hotstuff/verifier.go @@ -25,7 +25,7 @@ type Verifier interface { // that `voter` is authorized to vote. // Return values: // * nil if `sigData` is cryptographically valid - // * model.ErrInvalidFormat if the signature has an incompatible format. + // * model.InvalidFormatError if the signature has an incompatible format. // * model.ErrInvalidSignature is the signature is invalid // * model.InvalidSignerError is only relevant for extended signature schemes, // where special signing authority is only given to a _subset_ of consensus @@ -37,10 +37,13 @@ type Verifier interface { // VerifyQC checks the cryptographic validity of a QC's `SigData` w.r.t. the // given block. It is the responsibility of the calling code to ensure that - // all `voters` are authorized, without duplicates. + // all `signers` are authorized, without duplicates. // Return values: // * nil if `sigData` is cryptographically valid - // * model.ErrInvalidFormat if `sigData` has an incompatible format + // * model.InvalidFormatError if `sigData` has an incompatible format + // * model.InsufficientSignaturesError if `signers` is empty. + // Depending on the order of checks in the higher-level logic this error might + // be an indicator of a external byzantine input or an internal bug. // * model.ErrInvalidSignature if a signature is invalid // * model.InvalidSignerError is only relevant for extended signature schemes, // where special signing authority is only given to a _subset_ of consensus @@ -48,5 +51,5 @@ type Verifier interface { // being authorized, an InvalidSignerError is returned. // * unexpected errors should be treated as symptoms of bugs or uncovered // edge cases in the logic (i.e. as fatal) - VerifyQC(voters flow.IdentityList, sigData []byte, block *model.Block) error + VerifyQC(signers flow.IdentityList, sigData []byte, block *model.Block) error } diff --git a/consensus/hotstuff/votecollector/combined_vote_processor_v2.go b/consensus/hotstuff/votecollector/combined_vote_processor_v2.go index e2e4c48409c..8f914112516 100644 --- a/consensus/hotstuff/votecollector/combined_vote_processor_v2.go +++ b/consensus/hotstuff/votecollector/combined_vote_processor_v2.go @@ -14,7 +14,6 @@ import ( "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/encoding" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" msig "github.com/onflow/flow-go/module/signature" ) @@ -38,7 +37,7 @@ type combinedVoteProcessorFactoryBaseV2 struct { // Create creates CombinedVoteProcessorV2 for processing votes for the given block. // Caller must treat all errors as exceptions func (f *combinedVoteProcessorFactoryBaseV2) Create(log zerolog.Logger, block *model.Block) (hotstuff.VerifyingVoteProcessor, error) { - allParticipants, err := f.committee.Identities(block.BlockID, filter.Any) + allParticipants, err := f.committee.Identities(block.BlockID) if err != nil { return nil, fmt.Errorf("error retrieving consensus participants at block %v: %w", block.BlockID, err) } @@ -168,9 +167,9 @@ func (p *CombinedVoteProcessorV2) Process(vote *model.Vote) error { if p.done.Load() { return nil } - stakingSig, randomBeaconSig, err := signature.DecodeDoubleSig(vote.SigData) + stakingSig, randomBeaconSig, err := msig.DecodeDoubleSig(vote.SigData) if err != nil { - if errors.Is(err, model.ErrInvalidFormat) { + if errors.Is(err, msig.ErrInvalidSignatureFormat) { return model.NewInvalidVoteErrorf(vote, "could not decode signature: %w", err) } return fmt.Errorf("unexpected error decoding vote %v: %w", vote.ID(), err) @@ -256,7 +255,7 @@ func (p *CombinedVoteProcessorV2) Process(vote *model.Vote) error { p.log.Info(). Uint64("view", qc.View). - Int("num_signers", len(qc.SignerIDs)). + Hex("signers", qc.SignerIndices). Msg("new qc has been created") p.onQCCreated(qc) @@ -307,15 +306,16 @@ func buildQCWithPackerAndSigData( block *model.Block, blockSigData *hotstuff.BlockSignatureData, ) (*flow.QuorumCertificate, error) { - signerIDs, sigData, err := packer.Pack(block.BlockID, blockSigData) + signerIndices, sigData, err := packer.Pack(block.BlockID, blockSigData) + if err != nil { return nil, fmt.Errorf("could not pack the block sig data: %w", err) } return &flow.QuorumCertificate{ - View: block.View, - BlockID: block.BlockID, - SignerIDs: signerIDs, - SigData: sigData, + View: block.View, + BlockID: block.BlockID, + SignerIndices: signerIndices, + SigData: sigData, }, nil } diff --git a/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go b/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go index 8d11b275a8d..953a9e8d309 100644 --- a/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go +++ b/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go @@ -27,6 +27,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/local" modulemock "github.com/onflow/flow-go/module/mock" + msig "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/state/protocol/seed" storagemock "github.com/onflow/flow-go/storage/mock" @@ -113,7 +114,7 @@ func (s *CombinedVoteProcessorV2TestSuite) TestProcess_InvalidSignatureFormat() // valid length is SigLen or 2*SigLen generator := rapid.IntRange(0, 128).Filter(func(value int) bool { - return value != hsig.SigLen && value != 2*hsig.SigLen + return value != msig.SigLen && value != 2*msig.SigLen }) rapid.Check(s.T(), func(t *rapid.T) { // create a signature with invalid length @@ -122,8 +123,8 @@ func (s *CombinedVoteProcessorV2TestSuite) TestProcess_InvalidSignatureFormat() }) err := s.processor.Process(vote) require.Error(s.T(), err) - require.True(s.T(), model.IsInvalidVoteError(err)) - require.ErrorAs(s.T(), err, &model.ErrInvalidFormat) + require.True(s.T(), model.IsInvalidVoteError(err), err) + require.True(s.T(), errors.Is(err, msig.ErrInvalidSignatureFormat), err) }) } @@ -285,7 +286,7 @@ func (s *CombinedVoteProcessorV2TestSuite) TestProcess_BuildQCError() { reconstructor.On("EnoughShares").Return(true) reconstructor.On("Reconstruct").Return(unittest.SignatureFixture(), nil) - packer.On("Pack", mock.Anything, mock.Anything).Return(identities, unittest.RandomBytes(128), nil) + packer.On("Pack", mock.Anything, mock.Anything).Return(unittest.RandomBytes(100), unittest.RandomBytes(128), nil) // Helper factory function to create processors. We need new processor for every test case // because QC creation is one time operation and is triggered as soon as we have collected enough weight and shares. @@ -398,7 +399,7 @@ func (s *CombinedVoteProcessorV2TestSuite) TestProcess_ConcurrentCreatingQC() { s.reconstructor.On("EnoughShares").Return(true) // at this point sending any vote should result in creating QC. - s.packer.On("Pack", s.proposal.Block.BlockID, mock.Anything).Return(stakingSigners, unittest.RandomBytes(128), nil) + s.packer.On("Pack", s.proposal.Block.BlockID, mock.Anything).Return(unittest.RandomBytes(100), unittest.RandomBytes(128), nil) s.onQCCreatedState.On("onQCCreated", mock.Anything).Return(nil).Once() var startupWg, shutdownWg sync.WaitGroup @@ -490,8 +491,8 @@ func TestCombinedVoteProcessorV2_PropertyCreatingQCCorrectness(testifyT *testing // mock expected call to Packer mergedSignerIDs := make([]flow.Identifier, 0) packedSigData := unittest.RandomBytes(128) - packer := &mockhotstuff.Packer{} - packer.On("Pack", block.BlockID, mock.Anything).Run(func(args mock.Arguments) { + pcker := &mockhotstuff.Packer{} + pcker.On("Pack", block.BlockID, mock.Anything).Run(func(args mock.Arguments) { blockSigData := args.Get(1).(*hotstuff.BlockSignatureData) // check that aggregated signers are part of all votes signers @@ -518,7 +519,10 @@ func TestCombinedVoteProcessorV2_PropertyCreatingQCCorrectness(testifyT *testing // fill merged signers with collected signers mergedSignerIDs = append(expectedBlockSigData.StakingSigners, expectedBlockSigData.RandomBeaconSigners...) }).Return( - func(flow.Identifier, *hotstuff.BlockSignatureData) []flow.Identifier { return mergedSignerIDs }, + func(flow.Identifier, *hotstuff.BlockSignatureData) []byte { + signerIndices, _ := msig.EncodeSignersToIndices(mergedSignerIDs, mergedSignerIDs) + return signerIndices + }, func(flow.Identifier, *hotstuff.BlockSignatureData) []byte { return packedSigData }, func(flow.Identifier, *hotstuff.BlockSignatureData) error { return nil }).Once() @@ -532,12 +536,15 @@ func TestCombinedVoteProcessorV2_PropertyCreatingQCCorrectness(testifyT *testing t.Fatalf("QC created more than once") } + signerIndices, err := msig.EncodeSignersToIndices(mergedSignerIDs, mergedSignerIDs) + require.NoError(t, err) + // ensure that QC contains correct field expectedQC := &flow.QuorumCertificate{ - View: block.View, - BlockID: block.BlockID, - SignerIDs: mergedSignerIDs, - SigData: packedSigData, + View: block.View, + BlockID: block.BlockID, + SignerIndices: signerIndices, + SigData: packedSigData, } require.Equalf(t, expectedQC, qc, "QC should be equal to what we expect") } @@ -548,7 +555,7 @@ func TestCombinedVoteProcessorV2_PropertyCreatingQCCorrectness(testifyT *testing stakingSigAggtor: stakingAggregator, rbRector: reconstructor, onQCCreated: onQCCreated, - packer: packer, + packer: pcker, minRequiredWeight: minRequiredWeight, done: *atomic.NewBool(false), } @@ -556,7 +563,7 @@ func TestCombinedVoteProcessorV2_PropertyCreatingQCCorrectness(testifyT *testing votes := make([]*model.Vote, 0, stakingSignersCount+beaconSignersCount) expectStakingAggregatorCalls := func(vote *model.Vote) { - expectedSig := crypto.Signature(vote.SigData[:hsig.SigLen]) + expectedSig := crypto.Signature(vote.SigData[:msig.SigLen]) stakingAggregator.On("Verify", vote.SignerID, expectedSig).Return(nil).Maybe() stakingAggregator.On("TrustedAdd", vote.SignerID, expectedSig).Run(func(args mock.Arguments) { signerID := args.Get(0).(flow.Identifier) @@ -579,7 +586,7 @@ func TestCombinedVoteProcessorV2_PropertyCreatingQCCorrectness(testifyT *testing vote := unittest.VoteForBlockFixture(processor.Block(), VoteWithDoubleSig()) vote.SignerID = signer expectStakingAggregatorCalls(vote) - expectedSig := crypto.Signature(vote.SigData[hsig.SigLen:]) + expectedSig := crypto.Signature(vote.SigData[msig.SigLen:]) reconstructor.On("Verify", vote.SignerID, expectedSig).Return(nil).Maybe() reconstructor.On("TrustedAdd", vote.SignerID, expectedSig).Run(func(args mock.Arguments) { collectedShares.Inc() @@ -672,14 +679,17 @@ func TestCombinedVoteProcessorV2_PropertyCreatingQCLiveness(testifyT *testing.T) // mock expected calls to aggregator and reconstructor combinedSigs := unittest.SignaturesFixture(2) - stakingAggregator.On("Aggregate").Return(stakingSigners.NodeIDs(), []byte(combinedSigs[0]), nil).Once() + stakingAggregator.On("Aggregate").Return([]flow.Identifier(stakingSigners.NodeIDs()), []byte(combinedSigs[0]), nil).Once() reconstructor.On("Reconstruct").Return(combinedSigs[1], nil).Once() // mock expected call to Packer mergedSignerIDs := append(stakingSigners.NodeIDs(), beaconSigners.NodeIDs()...) packedSigData := unittest.RandomBytes(128) - packer := &mockhotstuff.Packer{} - packer.On("Pack", block.BlockID, mock.Anything).Return(mergedSignerIDs, packedSigData, nil) + pcker := &mockhotstuff.Packer{} + + signerIndices, err := msig.EncodeSignersToIndices(mergedSignerIDs, mergedSignerIDs) + require.NoError(t, err) + pcker.On("Pack", block.BlockID, mock.Anything).Return(signerIndices, packedSigData, nil) // track if QC was created qcCreated := atomic.NewBool(false) @@ -698,7 +708,7 @@ func TestCombinedVoteProcessorV2_PropertyCreatingQCLiveness(testifyT *testing.T) stakingSigAggtor: stakingAggregator, rbRector: reconstructor, onQCCreated: onQCCreated, - packer: packer, + packer: pcker, minRequiredWeight: minRequiredWeight, done: *atomic.NewBool(false), } @@ -706,7 +716,7 @@ func TestCombinedVoteProcessorV2_PropertyCreatingQCLiveness(testifyT *testing.T) votes := make([]*model.Vote, 0, stakingSignersCount+beaconSignersCount) expectStakingAggregatorCalls := func(vote *model.Vote, weight uint64) { - expectedSig := crypto.Signature(vote.SigData[:hsig.SigLen]) + expectedSig := crypto.Signature(vote.SigData[:msig.SigLen]) stakingAggregator.On("Verify", vote.SignerID, expectedSig).Return(nil).Maybe() stakingAggregator.On("TrustedAdd", vote.SignerID, expectedSig).Run(func(args mock.Arguments) { stakingTotalWeight.Add(weight) @@ -724,7 +734,7 @@ func TestCombinedVoteProcessorV2_PropertyCreatingQCLiveness(testifyT *testing.T) vote := unittest.VoteForBlockFixture(processor.Block(), VoteWithDoubleSig()) vote.SignerID = signer.ID() expectStakingAggregatorCalls(vote, signer.Weight) - expectedSig := crypto.Signature(vote.SigData[hsig.SigLen:]) + expectedSig := crypto.Signature(vote.SigData[msig.SigLen:]) reconstructor.On("Verify", vote.SignerID, expectedSig).Return(nil).Maybe() reconstructor.On("TrustedAdd", vote.SignerID, expectedSig).Run(func(args mock.Arguments) { collectedShares.Inc() @@ -898,7 +908,7 @@ func TestCombinedVoteProcessorV2_BuildVerifyQC(t *testing.T) { func VoteWithStakingSig() func(*model.Vote) { return func(vote *model.Vote) { - vote.SigData = unittest.RandomBytes(hsig.SigLen) + vote.SigData = unittest.RandomBytes(msig.SigLen) } } diff --git a/consensus/hotstuff/votecollector/combined_vote_processor_v3.go b/consensus/hotstuff/votecollector/combined_vote_processor_v3.go index 73a98a00654..29d7126f76f 100644 --- a/consensus/hotstuff/votecollector/combined_vote_processor_v3.go +++ b/consensus/hotstuff/votecollector/combined_vote_processor_v3.go @@ -14,7 +14,6 @@ import ( "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/encoding" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" msig "github.com/onflow/flow-go/module/signature" ) @@ -40,7 +39,7 @@ type combinedVoteProcessorFactoryBaseV3 struct { // Caller must treat all errors as exceptions // nolint:unused func (f *combinedVoteProcessorFactoryBaseV3) Create(log zerolog.Logger, block *model.Block) (hotstuff.VerifyingVoteProcessor, error) { - allParticipants, err := f.committee.Identities(block.BlockID, filter.Any) + allParticipants, err := f.committee.Identities(block.BlockID) if err != nil { return nil, fmt.Errorf("error retrieving consensus participants at block %v: %w", block.BlockID, err) } @@ -165,9 +164,9 @@ func (p *CombinedVoteProcessorV3) Process(vote *model.Vote) error { if p.done.Load() { return nil } - sigType, sig, err := signature.DecodeSingleSig(vote.SigData) + sigType, sig, err := msig.DecodeSingleSig(vote.SigData) if err != nil { - if errors.Is(err, model.ErrInvalidFormat) { + if errors.Is(err, msig.ErrInvalidSignatureFormat) { return model.NewInvalidVoteErrorf(vote, "could not decode signature: %w", err) } return fmt.Errorf("unexpected error decoding vote %v: %w", vote.ID(), err) @@ -175,7 +174,7 @@ func (p *CombinedVoteProcessorV3) Process(vote *model.Vote) error { switch sigType { - case hotstuff.SigTypeStaking: + case encoding.SigTypeStaking: err := p.stakingSigAggtor.Verify(vote.SignerID, sig) if err != nil { if model.IsInvalidSignerError(err) { @@ -198,7 +197,7 @@ func (p *CombinedVoteProcessorV3) Process(vote *model.Vote) error { return fmt.Errorf("adding the signature to staking aggregator failed for vote %v: %w", vote.ID(), err) } - case hotstuff.SigTypeRandomBeacon: + case encoding.SigTypeRandomBeacon: err := p.rbSigAggtor.Verify(vote.SignerID, sig) if err != nil { if model.IsInvalidSignerError(err) { @@ -227,7 +226,7 @@ func (p *CombinedVoteProcessorV3) Process(vote *model.Vote) error { } default: - return model.NewInvalidVoteErrorf(vote, "invalid signature type %d: %w", sigType, model.ErrInvalidFormat) + return model.NewInvalidVoteErrorf(vote, "invalid signature type %d: %w", sigType, model.NewInvalidFormatErrorf("")) } // checking of conditions for building QC are satisfied @@ -254,7 +253,7 @@ func (p *CombinedVoteProcessorV3) Process(vote *model.Vote) error { p.log.Info(). Uint64("view", qc.View). - Int("num_signers", len(qc.SignerIDs)). + Hex("signers", qc.SignerIndices). Msg("new qc has been created") p.onQCCreated(qc) @@ -305,15 +304,15 @@ func (p *CombinedVoteProcessorV3) buildQC() (*flow.QuorumCertificate, error) { AggregatedRandomBeaconSig: aggregatedRandomBeaconSig, ReconstructedRandomBeaconSig: reconstructedBeaconSig, } - signerIDs, sigData, err := p.packer.Pack(p.block.BlockID, blockSigData) + signerIndices, sigData, err := p.packer.Pack(p.block.BlockID, blockSigData) if err != nil { return nil, fmt.Errorf("could not pack the block sig data: %w", err) } return &flow.QuorumCertificate{ - View: p.block.View, - BlockID: p.block.BlockID, - SignerIDs: signerIDs, - SigData: sigData, + View: p.block.View, + BlockID: p.block.BlockID, + SignerIndices: signerIndices, + SigData: sigData, }, nil } diff --git a/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go b/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go index d2997cbf569..0e45868c33b 100644 --- a/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go +++ b/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go @@ -26,6 +26,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/local" modulemock "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/state/protocol/inmem" storagemock "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" @@ -131,7 +132,7 @@ func (s *CombinedVoteProcessorV3TestSuite) TestProcess_InvalidSignatureFormat() err := s.processor.Process(vote) require.Error(s.T(), err) require.True(s.T(), model.IsInvalidVoteError(err)) - require.ErrorAs(s.T(), err, &model.ErrInvalidFormat) + require.True(s.T(), errors.Is(err, signature.ErrInvalidSignatureFormat), err) } // TestProcess_InvalidSignature tests that CombinedVoteProcessorV2 rejects invalid votes for the following scenarios: @@ -397,7 +398,7 @@ func (s *CombinedVoteProcessorV3TestSuite) TestProcess_ConcurrentCreatingQC() { s.reconstructor.On("EnoughShares").Return(true) // at this point sending any vote should result in creating QC. - s.packer.On("Pack", s.proposal.Block.BlockID, mock.Anything).Return(stakingSigners, unittest.RandomBytes(128), nil) + s.packer.On("Pack", s.proposal.Block.BlockID, mock.Anything).Return(unittest.RandomBytes(100), unittest.RandomBytes(128), nil) s.onQCCreatedState.On("onQCCreated", mock.Anything).Return(nil).Once() var startupWg, shutdownWg sync.WaitGroup @@ -511,8 +512,8 @@ func TestCombinedVoteProcessorV3_PropertyCreatingQCCorrectness(testifyT *testing // mock expected call to Packer mergedSignerIDs := ([]flow.Identifier)(nil) packedSigData := unittest.RandomBytes(128) - packer := &mockhotstuff.Packer{} - packer.On("Pack", block.BlockID, mock.Anything).Run(func(args mock.Arguments) { + pcker := &mockhotstuff.Packer{} + pcker.On("Pack", block.BlockID, mock.Anything).Run(func(args mock.Arguments) { blockSigData := args.Get(1).(*hotstuff.BlockSignatureData) // in the following, we check validity for each field of `blockSigData` individually @@ -564,7 +565,10 @@ func TestCombinedVoteProcessorV3_PropertyCreatingQCCorrectness(testifyT *testing // fill merged signers with collected signers mergedSignerIDs = append(blockSigData.StakingSigners, blockSigData.RandomBeaconSigners...) }).Return( - func(flow.Identifier, *hotstuff.BlockSignatureData) []flow.Identifier { return mergedSignerIDs }, + func(flow.Identifier, *hotstuff.BlockSignatureData) []byte { + signerIndices, _ := signature.EncodeSignersToIndices(mergedSignerIDs, mergedSignerIDs) + return signerIndices + }, func(flow.Identifier, *hotstuff.BlockSignatureData) []byte { return packedSigData }, func(flow.Identifier, *hotstuff.BlockSignatureData) error { return nil }).Once() @@ -578,12 +582,15 @@ func TestCombinedVoteProcessorV3_PropertyCreatingQCCorrectness(testifyT *testing t.Fatalf("QC created more than once") } + signerIndices, err := signature.EncodeSignersToIndices(mergedSignerIDs, mergedSignerIDs) + require.NoError(t, err) + // ensure that QC contains correct field expectedQC := &flow.QuorumCertificate{ - View: block.View, - BlockID: block.BlockID, - SignerIDs: mergedSignerIDs, - SigData: packedSigData, + View: block.View, + BlockID: block.BlockID, + SignerIndices: signerIndices, + SigData: packedSigData, } require.Equalf(t, expectedQC, qc, "QC should be equal to what we expect") } @@ -595,7 +602,7 @@ func TestCombinedVoteProcessorV3_PropertyCreatingQCCorrectness(testifyT *testing rbSigAggtor: rbSigAggregator, rbRector: reconstructor, onQCCreated: onQCCreated, - packer: packer, + packer: pcker, minRequiredWeight: minRequiredWeight, done: *atomic.NewBool(false), } @@ -725,7 +732,7 @@ func TestCombinedVoteProcessorV3_OnlyRandomBeaconSigners(testifyT *testing.T) { require.Empty(testifyT, blockSigData.StakingSigners) require.Empty(testifyT, blockSigData.AggregatedStakingSig) }). - Return(unittest.IdentifierListFixture(11), unittest.RandomBytes(1017), nil).Once() + Return(unittest.RandomBytes(100), unittest.RandomBytes(1017), nil).Once() err := processor.Process(vote) require.NoError(testifyT, err) @@ -806,14 +813,17 @@ func TestCombinedVoteProcessorV3_PropertyCreatingQCLiveness(testifyT *testing.T) } return nil }).Maybe() - rbSigAggregator.On("Aggregate").Return(beaconSigners.NodeIDs(), []byte(combinedSigs[1]), nil).Once() + rbSigAggregator.On("Aggregate").Return([]flow.Identifier(beaconSigners.NodeIDs()), []byte(combinedSigs[1]), nil).Once() reconstructor.On("Reconstruct").Return(combinedSigs[2], nil).Once() // mock expected call to Packer mergedSignerIDs := append(stakingSigners.NodeIDs(), beaconSigners.NodeIDs()...) packedSigData := unittest.RandomBytes(128) - packer := &mockhotstuff.Packer{} - packer.On("Pack", block.BlockID, mock.Anything).Return(mergedSignerIDs, packedSigData, nil) + pcker := &mockhotstuff.Packer{} + + signerIndices, err := signature.EncodeSignersToIndices(mergedSignerIDs, mergedSignerIDs) + require.NoError(t, err) + pcker.On("Pack", block.BlockID, mock.Anything).Return(signerIndices, packedSigData, nil) // track if QC was created qcCreated := atomic.NewBool(false) @@ -833,7 +843,7 @@ func TestCombinedVoteProcessorV3_PropertyCreatingQCLiveness(testifyT *testing.T) rbSigAggtor: rbSigAggregator, rbRector: reconstructor, onQCCreated: onQCCreated, - packer: packer, + packer: pcker, minRequiredWeight: minRequiredWeight, done: *atomic.NewBool(false), } diff --git a/consensus/hotstuff/votecollector/staking_vote_processor.go b/consensus/hotstuff/votecollector/staking_vote_processor.go index 78679a30c6d..0eaf52f755b 100644 --- a/consensus/hotstuff/votecollector/staking_vote_processor.go +++ b/consensus/hotstuff/votecollector/staking_vote_processor.go @@ -14,7 +14,7 @@ import ( "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/encoding" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" + msig "github.com/onflow/flow-go/module/signature" ) /* ***************** Base-Factory for StakingVoteProcessor ****************** */ @@ -36,7 +36,7 @@ type stakingVoteProcessorFactoryBase struct { // Create creates StakingVoteProcessor for processing votes for the given block. // Caller must treat all errors as exceptions func (f *stakingVoteProcessorFactoryBase) Create(log zerolog.Logger, block *model.Block) (hotstuff.VerifyingVoteProcessor, error) { - allParticipants, err := f.committee.Identities(block.BlockID, filter.Any) + allParticipants, err := f.committee.Identities(block.BlockID) if err != nil { return nil, fmt.Errorf("error retrieving consensus participants: %w", err) } @@ -64,6 +64,7 @@ func (f *stakingVoteProcessorFactoryBase) Create(log zerolog.Logger, block *mode onQCCreated: f.onQCCreated, minRequiredWeight: minRequiredWeight, done: *atomic.NewBool(false), + allParticipants: allParticipants, }, nil } @@ -80,6 +81,7 @@ type StakingVoteProcessor struct { onQCCreated hotstuff.OnQCCreated minRequiredWeight uint64 done atomic.Bool + allParticipants flow.IdentityList } // Block returns block that is part of proposal that we are processing votes for. @@ -163,10 +165,23 @@ func (p *StakingVoteProcessor) buildQC() (*flow.QuorumCertificate, error) { return nil, fmt.Errorf("could not aggregate staking signature: %w", err) } + signerIndices, err := p.signerIndicesFromIdentities(stakingSigners) + if err != nil { + return nil, fmt.Errorf("could not encode signer indices: %w", err) + } + return &flow.QuorumCertificate{ - View: p.block.View, - BlockID: p.block.BlockID, - SignerIDs: stakingSigners, - SigData: aggregatedStakingSig, + View: p.block.View, + BlockID: p.block.BlockID, + SignerIndices: signerIndices, + SigData: aggregatedStakingSig, }, nil } + +func (p *StakingVoteProcessor) signerIndicesFromIdentities(signerIDs flow.IdentifierList) ([]byte, error) { + signerIndices, err := msig.EncodeSignersToIndices(p.allParticipants.NodeIDs(), signerIDs) + if err != nil { + return nil, fmt.Errorf("could not encode signer identifiers to indices: %w", err) + } + return signerIndices, nil +} diff --git a/consensus/hotstuff/votecollector/staking_vote_processor_test.go b/consensus/hotstuff/votecollector/staking_vote_processor_test.go index 501487b1768..291d837e3ec 100644 --- a/consensus/hotstuff/votecollector/staking_vote_processor_test.go +++ b/consensus/hotstuff/votecollector/staking_vote_processor_test.go @@ -20,6 +20,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/local" modulemock "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/utils/unittest" ) @@ -31,11 +32,13 @@ func TestStakingVoteProcessor(t *testing.T) { type StakingVoteProcessorTestSuite struct { VoteProcessorTestSuiteBase - processor *StakingVoteProcessor + processor *StakingVoteProcessor + allParticipants flow.IdentityList } func (s *StakingVoteProcessorTestSuite) SetupTest() { s.VoteProcessorTestSuiteBase.SetupTest() + s.allParticipants = unittest.IdentityListFixture(14) s.processor = &StakingVoteProcessor{ log: unittest.Logger(), block: s.proposal.Block, @@ -43,6 +46,7 @@ func (s *StakingVoteProcessorTestSuite) SetupTest() { onQCCreated: s.onQCCreated, minRequiredWeight: s.minRequiredWeight, done: *atomic.NewBool(false), + allParticipants: s.allParticipants, } } @@ -150,22 +154,24 @@ func (s *StakingVoteProcessorTestSuite) TestProcess_NotEnoughStakingWeight() { // aggregator. func (s *StakingVoteProcessorTestSuite) TestProcess_CreatingQC() { // prepare test setup: 13 votes with staking sigs - stakingSigners := unittest.IdentifierListFixture(14) + stakingSigners := s.allParticipants[:14].NodeIDs() + signerIndices, err := signature.EncodeSignersToIndices(stakingSigners, stakingSigners) + require.NoError(s.T(), err) // setup aggregator *s.stakingAggregator = mockhotstuff.WeightedSignatureAggregator{} expectedSigData := unittest.RandomBytes(128) - s.stakingAggregator.On("Aggregate").Return(stakingSigners, expectedSigData, nil).Once() + s.stakingAggregator.On("Aggregate").Return([]flow.Identifier(stakingSigners), expectedSigData, nil).Once() // expected QC s.onQCCreatedState.On("onQCCreated", mock.Anything).Run(func(args mock.Arguments) { qc := args.Get(0).(*flow.QuorumCertificate) // ensure that QC contains correct field expectedQC := &flow.QuorumCertificate{ - View: s.proposal.Block.View, - BlockID: s.proposal.Block.BlockID, - SignerIDs: stakingSigners, - SigData: expectedSigData, + View: s.proposal.Block.View, + BlockID: s.proposal.Block.BlockID, + SignerIndices: signerIndices, + SigData: expectedSigData, } require.Equal(s.T(), expectedQC, qc) }).Return(nil).Once() @@ -189,7 +195,7 @@ func (s *StakingVoteProcessorTestSuite) TestProcess_CreatingQC() { // processing extra votes shouldn't result in creating new QCs vote := unittest.VoteForBlockFixture(s.proposal.Block) - err := s.processor.Process(vote) + err = s.processor.Process(vote) require.NoError(s.T(), err) s.onQCCreatedState.AssertExpectations(s.T()) @@ -198,12 +204,12 @@ func (s *StakingVoteProcessorTestSuite) TestProcess_CreatingQC() { // TestProcess_ConcurrentCreatingQC tests a scenario where multiple goroutines process vote at same time, // we expect only one QC created in this scenario. func (s *StakingVoteProcessorTestSuite) TestProcess_ConcurrentCreatingQC() { - stakingSigners := unittest.IdentifierListFixture(10) + stakingSigners := s.allParticipants[:10].NodeIDs() mockAggregator := func(aggregator *mockhotstuff.WeightedSignatureAggregator) { aggregator.On("Verify", mock.Anything, mock.Anything).Return(nil) aggregator.On("TrustedAdd", mock.Anything, mock.Anything).Return(s.minRequiredWeight, nil) aggregator.On("TotalWeight").Return(s.minRequiredWeight) - aggregator.On("Aggregate").Return(stakingSigners, unittest.RandomBytes(128), nil) + aggregator.On("Aggregate").Return([]flow.Identifier(stakingSigners), unittest.RandomBytes(128), nil) } // mock aggregators, so we have enough weight and shares for creating QC diff --git a/consensus/integration/signer_test.go b/consensus/integration/signer_test.go index eb91cb43e7a..ea443394d20 100644 --- a/consensus/integration/signer_test.go +++ b/consensus/integration/signer_test.go @@ -26,15 +26,11 @@ func (s *Signer) CreateVote(block *model.Block) (*model.Vote, error) { return vote, nil } func (*Signer) CreateQC(votes []*model.Vote) (*flow.QuorumCertificate, error) { - voterIDs := make([]flow.Identifier, 0, len(votes)) - for _, vote := range votes { - voterIDs = append(voterIDs, vote.SignerID) - } qc := &flow.QuorumCertificate{ - View: votes[0].View, - BlockID: votes[0].BlockID, - SignerIDs: voterIDs, - SigData: nil, + View: votes[0].View, + BlockID: votes[0].BlockID, + SignerIndices: nil, + SigData: nil, } return qc, nil } diff --git a/engine/access/access_test.go b/engine/access/access_test.go index 550c8526eff..23f20fa119d 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -28,9 +28,12 @@ import ( factorymock "github.com/onflow/flow-go/engine/access/rpc/backend/mock" "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/factory" + "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/network/mocknetwork" protocol "github.com/onflow/flow-go/state/protocol/mock" storage "github.com/onflow/flow-go/storage/badger" @@ -244,7 +247,7 @@ func (suite *Suite) TestSendTransactionToRandomCollectionNode() { count := 2 collNodes := unittest.IdentityListFixture(count, unittest.WithRole(flow.RoleCollection)) assignments := unittest.ClusterAssignment(uint(count), collNodes) - clusters, err := flow.NewClusterList(assignments, collNodes) + clusters, err := factory.NewClusterList(assignments, collNodes) suite.Require().Nil(err) collNode1 := clusters[0][0] collNode2 := clusters[1][0] @@ -812,13 +815,32 @@ func (suite *Suite) TestExecuteScript() { func (suite *Suite) createChain() (flow.Block, flow.Collection) { collection := unittest.CollectionFixture(10) + refBlockID := unittest.IdentifierFixture() + // prepare cluster committee members + clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole(flow.RoleCollection)) + // guarantee signers must be cluster committee members, so that access will fetch collection from + // the signers that are specified by guarantee.SignerIndices + indices, err := signature.EncodeSignersToIndices(clusterCommittee.NodeIDs(), clusterCommittee.NodeIDs()) + require.NoError(suite.T(), err) guarantee := &flow.CollectionGuarantee{ - CollectionID: collection.ID(), - Signature: crypto.Signature([]byte("signature A")), + CollectionID: collection.ID(), + Signature: crypto.Signature([]byte("signature A")), + ReferenceBlockID: refBlockID, + SignerIndices: indices, } block := unittest.BlockFixture() block.Payload.Guarantees = []*flow.CollectionGuarantee{guarantee} block.Header.PayloadHash = block.Payload.Hash() + cluster := new(protocol.Cluster) + cluster.On("Members").Return(clusterCommittee, nil) + epoch := new(protocol.Epoch) + epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) + epochs := new(protocol.EpochQuery) + epochs.On("Current").Return(epoch) + snap := new(protocol.Snapshot) + snap.On("Epochs").Return(epochs) + suite.state.On("AtBlockID", refBlockID).Return(snap) + return block, collection } diff --git a/engine/access/ingestion/engine.go b/engine/access/ingestion/engine.go index 9701ed8aec8..ebe6afe6b60 100644 --- a/engine/access/ingestion/engine.go +++ b/engine/access/ingestion/engine.go @@ -238,7 +238,7 @@ func (e *Engine) processFinalizedBlock(blockID flow.Identifier) error { } // queue requesting each of the collections from the collection node - e.requestCollections(block.Payload.Guarantees) + e.requestCollectionsInFinalizedBlock(block.Payload.Guarantees) return nil } @@ -438,7 +438,7 @@ func (e *Engine) requestMissingCollections(ctx context.Context) error { } // request the missing collections - e.requestCollections(missingColls) + e.requestCollectionsInFinalizedBlock(missingColls) // add them to the missing collection id map to track later for _, cg := range missingColls { @@ -583,7 +583,7 @@ func (e *Engine) updateLastFullBlockReceivedIndex() { Int("threshold", defaultMissingCollsForBlkThreshold). Uint64("last_full_blk_height", latestFullHeight). Msg("re-requesting missing collections") - e.requestCollections(allMissingColls) + e.requestCollectionsInFinalizedBlock(allMissingColls) } e.log.Debug().Uint64("last_full_blk_height", latestFullHeight).Msg("updated LastFullBlockReceived index") @@ -623,9 +623,14 @@ func (e *Engine) lookupCollection(collId flow.Identifier) (bool, error) { return false, fmt.Errorf("failed to retreive collection %s: %w", collId.String(), err) } -// requestCollections registers collection requests with the requester engine -func (e *Engine) requestCollections(missingColls []*flow.CollectionGuarantee) { +// requestCollectionsInFinalizedBlock registers collection requests with the requester engine +func (e *Engine) requestCollectionsInFinalizedBlock(missingColls []*flow.CollectionGuarantee) { for _, cg := range missingColls { - e.request.EntityByID(cg.ID(), filter.HasNodeID(cg.SignerIDs...)) + // TODO: move this query out of for loop? + guarantors, err := protocol.FindGuarantors(e.state, cg) + if err != nil { + e.log.Fatal().Err(err).Msgf("could not find guarantors for guarantee %v", cg.ID()) + } + e.request.EntityByID(cg.ID(), filter.HasNodeID(guarantors...)) } } diff --git a/engine/access/ingestion/engine_test.go b/engine/access/ingestion/engine_test.go index 165374817e1..6dcb761aa14 100644 --- a/engine/access/ingestion/engine_test.go +++ b/engine/access/ingestion/engine_test.go @@ -17,11 +17,13 @@ import ( "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/access/rpc" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/module/signature" protocol "github.com/onflow/flow-go/state/protocol/mock" storerr "github.com/onflow/flow-go/storage" storage "github.com/onflow/flow-go/storage/mock" @@ -112,6 +114,19 @@ func (suite *Suite) TestOnFinalizedBlock() { unittest.WithGuarantees(unittest.CollectionGuaranteesFixture(4)...), unittest.WithExecutionResults(unittest.ExecutionResultFixture()), )) + + // prepare cluster committee members + clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole(flow.RoleCollection)) + refBlockID := unittest.IdentifierFixture() + for _, guarantee := range block.Payload.Guarantees { + guarantee.ReferenceBlockID = refBlockID + // guarantee signers must be cluster committee members, so that access will fetch collection from + // the signers that are specified by guarantee.SignerIndices + indices, err := signature.EncodeSignersToIndices(clusterCommittee.NodeIDs(), clusterCommittee.NodeIDs()) + require.NoError(suite.T(), err) + guarantee.SignerIndices = indices + } + hotstuffBlock := hotmodel.Block{ BlockID: block.ID(), } @@ -127,6 +142,15 @@ func (suite *Suite) TestOnFinalizedBlock() { // expect that the block storage is indexed with each of the collection guarantee suite.blocks.On("IndexBlockForCollections", block.ID(), flow.GetIDs(block.Payload.Guarantees)).Return(nil).Once() + cluster := new(protocol.Cluster) + cluster.On("Members").Return(clusterCommittee, nil) + epoch := new(protocol.Epoch) + epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) + epochs := new(protocol.EpochQuery) + epochs.On("Current").Return(epoch) + snap := new(protocol.Snapshot) + snap.On("Epochs").Return(epochs) + suite.proto.state.On("AtBlockID", refBlockID).Return(snap) suite.results.On("Index", mock.Anything, mock.Anything).Return(nil) // for each of the guarantees, we should request the corresponding collection once @@ -298,12 +322,17 @@ func (suite *Suite) TestRequestMissingCollections() { blocks := make([]flow.Block, blkCnt) heightMap := make(map[uint64]*flow.Block, blkCnt) + // prepare cluster committee members + clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole(flow.RoleCollection)) + // generate the test blocks and collections var collIDs []flow.Identifier + refBlockID := unittest.IdentifierFixture() for i := 0; i < blkCnt; i++ { block := unittest.BlockFixture() block.SetPayload(unittest.PayloadFixture( - unittest.WithGuarantees(unittest.CollectionGuaranteesFixture(4)...), + unittest.WithGuarantees( + unittest.CollectionGuaranteesFixture(4, unittest.WithCollRef(refBlockID))...), )) // some blocks may not be present hence add a gap height := startHeight + uint64(i) @@ -312,6 +341,13 @@ func (suite *Suite) TestRequestMissingCollections() { heightMap[height] = &block for _, c := range block.Payload.Guarantees { collIDs = append(collIDs, c.CollectionID) + c.ReferenceBlockID = refBlockID + + // guarantee signers must be cluster committee members, so that access will fetch collection from + // the signers that are specified by guarantee.SignerIndices + indices, err := signature.EncodeSignersToIndices(clusterCommittee.NodeIDs(), clusterCommittee.NodeIDs()) + require.NoError(suite.T(), err) + c.SignerIndices = indices } } @@ -366,6 +402,16 @@ func (suite *Suite) TestRequestMissingCollections() { // force should be called once suite.request.On("Force").Return() + cluster := new(protocol.Cluster) + cluster.On("Members").Return(clusterCommittee, nil) + epoch := new(protocol.Epoch) + epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) + epochs := new(protocol.EpochQuery) + epochs.On("Current").Return(epoch) + snap := new(protocol.Snapshot) + snap.On("Epochs").Return(epochs) + suite.proto.state.On("AtBlockID", refBlockID).Return(snap) + assertExpectations := func() { suite.request.AssertExpectations(suite.T()) suite.collections.AssertExpectations(suite.T()) @@ -418,6 +464,10 @@ func (suite *Suite) TestUpdateLastFullBlockReceivedIndex() { heightMap := make(map[uint64]*flow.Block, blkCnt) collMap := make(map[flow.Identifier]*flow.LightCollection, blkCnt*collPerBlk) + // prepare cluster committee members + clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole(flow.RoleCollection)) + + refBlockID := unittest.IdentifierFixture() // generate the test blocks, cgs and collections for i := 0; i < blkCnt; i++ { guarantees := make([]*flow.CollectionGuarantee, collPerBlk) @@ -426,7 +476,14 @@ func (suite *Suite) TestUpdateLastFullBlockReceivedIndex() { collMap[coll.ID()] = &coll cg := unittest.CollectionGuaranteeFixture(func(cg *flow.CollectionGuarantee) { cg.CollectionID = coll.ID() + cg.ReferenceBlockID = refBlockID }) + + // guarantee signers must be cluster committee members, so that access will fetch collection from + // the signers that are specified by guarantee.SignerIndices + indices, err := signature.EncodeSignersToIndices(clusterCommittee.NodeIDs(), clusterCommittee.NodeIDs()) + require.NoError(suite.T(), err) + cg.SignerIndices = indices guarantees[j] = cg } block := unittest.BlockFixture() @@ -457,6 +514,16 @@ func (suite *Suite) TestUpdateLastFullBlockReceivedIndex() { return storerr.ErrNotFound }) + cluster := new(protocol.Cluster) + cluster.On("Members").Return(clusterCommittee, nil) + epoch := new(protocol.Epoch) + epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) + epochs := new(protocol.EpochQuery) + epochs.On("Current").Return(epoch) + snap := new(protocol.Snapshot) + snap.On("Epochs").Return(epochs) + suite.proto.state.On("AtBlockID", refBlockID).Return(snap) + // blkMissingColl controls which collections are reported as missing by the collections storage mock blkMissingColl := make([]bool, blkCnt) for i := 0; i < blkCnt; i++ { diff --git a/engine/access/rest/models/collection.go b/engine/access/rest/models/collection.go index db701ddfef0..c5076fdc7db 100644 --- a/engine/access/rest/models/collection.go +++ b/engine/access/rest/models/collection.go @@ -1,6 +1,8 @@ package models import ( + "fmt" + "github.com/onflow/flow-go/engine/access/rest/util" "github.com/onflow/flow-go/model/flow" ) @@ -41,13 +43,8 @@ func (c *Collection) Build( } func (c *CollectionGuarantee) Build(guarantee *flow.CollectionGuarantee) { - signerIDs := make([]string, len(guarantee.SignerIDs)) - for i, signerID := range guarantee.SignerIDs { - signerIDs[i] = signerID.String() - } - c.CollectionId = guarantee.CollectionID.String() - c.SignerIds = signerIDs + c.SignerIndices = fmt.Sprintf("%x", guarantee.SignerIndices) c.Signature = util.ToBase64(guarantee.Signature.Bytes()) } diff --git a/engine/access/rest/models/model_collection_guarantee.go b/engine/access/rest/models/model_collection_guarantee.go index 51dd1097623..c41908c9951 100644 --- a/engine/access/rest/models/model_collection_guarantee.go +++ b/engine/access/rest/models/model_collection_guarantee.go @@ -9,7 +9,7 @@ package models type CollectionGuarantee struct { - CollectionId string `json:"collection_id"` - SignerIds []string `json:"signer_ids"` - Signature string `json:"signature"` + CollectionId string `json:"collection_id"` + SignerIndices string `json:"signer_indices"` + Signature string `json:"signature"` } diff --git a/engine/access/rest/util/example_select_filter.json b/engine/access/rest/util/example_select_filter.json new file mode 100644 index 00000000000..3585490fde3 --- /dev/null +++ b/engine/access/rest/util/example_select_filter.json @@ -0,0 +1,72 @@ +[ + { + "execution_result": { + "events": [ + { + "event_index": "2" + }, + { + "event_index": "3" + } + ] + }, + "header": { + "id": "abcd" + }, + "payload": { + "block_seals": [ + { + "aggregated_approval_signatures": [ + { + "signer_ids": [ + "abcdef0123456789", + "abcdef0123456789" + ] + } + ] + } + ], + "collection_guarantees": [ + { + "signature": "abcdef0123456789", + "signer_indices": "01" + } + ] + } + }, + { + "execution_result": { + "events": [ + { + "event_index": "2" + }, + { + "event_index": "3" + } + ] + }, + "header": { + "id": "abcd" + }, + "payload": { + "block_seals": [ + { + "aggregated_approval_signatures": [ + { + "signer_ids": [ + "abcdef0123456789", + "abcdef0123456789" + ] + } + ] + } + ], + "collection_guarantees": [ + { + "signature": "abcdef0123456789", + "signer_indices": "01" + } + ] + } + } +] \ No newline at end of file diff --git a/engine/access/rest/util/select_filter_test.go b/engine/access/rest/util/select_filter_test.go index 8c605b5db27..eb50bf7fc83 100644 --- a/engine/access/rest/util/select_filter_test.go +++ b/engine/access/rest/util/select_filter_test.go @@ -3,6 +3,7 @@ package util_test import ( "encoding/json" "fmt" + "io/ioutil" "strings" "testing" "time" @@ -76,15 +77,12 @@ func testFilter(t *testing.T, inputJson, exepectedJson string, description strin require.JSONEqf(t, exepectedJson, actualJson, description) } -func ExampleSelectFilter() { +func TestExampleSelectFilter(t *testing.T) { blocks := make([]models.Block, 2) for i := range blocks { block, err := generateBlock() - if err != nil { - fmt.Println(err) - return - } + require.NoError(t, err) blocks[i] = block } @@ -92,101 +90,24 @@ func ExampleSelectFilter() { "header.id", "payload.collection_guarantees.signature", "payload.block_seals.aggregated_approval_signatures.signer_ids", - "payload.collection_guarantees.signer_ids", + "payload.collection_guarantees.signer_indices", "execution_result.events.event_index", "something.nonexisting", } filteredBlock, err := util.SelectFilter(blocks, selectKeys) - if err != nil { - fmt.Println(err) - return - } + require.NoError(t, err) marshalled, err := json.MarshalIndent(filteredBlock, "", "\t") - if err != nil { - panic(err.Error()) - } - fmt.Println(string(marshalled)) - // Output: - //[ - // { - // "execution_result": { - // "events": [ - // { - // "event_index": "2" - // }, - // { - // "event_index": "3" - // } - // ] - // }, - // "header": { - // "id": "abcd" - // }, - // "payload": { - // "block_seals": [ - // { - // "aggregated_approval_signatures": [ - // { - // "signer_ids": [ - // "abcdef0123456789", - // "abcdef0123456789" - // ] - // } - // ] - // } - // ], - // "collection_guarantees": [ - // { - // "signature": "abcdef0123456789", - // "signer_ids": [ - // "abcdef0123456789", - // "abcdef0123456789" - // ] - // } - // ] - // } - // }, - // { - // "execution_result": { - // "events": [ - // { - // "event_index": "2" - // }, - // { - // "event_index": "3" - // } - // ] - // }, - // "header": { - // "id": "abcd" - // }, - // "payload": { - // "block_seals": [ - // { - // "aggregated_approval_signatures": [ - // { - // "signer_ids": [ - // "abcdef0123456789", - // "abcdef0123456789" - // ] - // } - // ] - // } - // ], - // "collection_guarantees": [ - // { - // "signature": "abcdef0123456789", - // "signer_ids": [ - // "abcdef0123456789", - // "abcdef0123456789" - // ] - // } - // ] - // } - // } - //] + require.NoError(t, err) + + // enable to update test case if there is change in the models.Block struct + // _ = ioutil.WriteFile("example_select_filter.json", marshalled, 0644) + + byteValue, err := ioutil.ReadFile("example_select_filter.json") + require.NoError(t, err) + + require.Equal(t, string(byteValue), string(marshalled)) } func generateBlock() (models.Block, error) { @@ -211,9 +132,9 @@ func generateBlock() (models.Block, error) { Payload: &models.BlockPayload{ CollectionGuarantees: []models.CollectionGuarantee{ { - CollectionId: "abcdef0123456789", - SignerIds: multipleDummySignatures, - Signature: dummySignature, + CollectionId: "abcdef0123456789", + SignerIndices: fmt.Sprintf("%x", []byte{1}), + Signature: dummySignature, }, }, BlockSeals: []models.BlockSeal{ diff --git a/engine/collection/compliance/core.go b/engine/collection/compliance/core.go index 56ddab4c02f..afd73687b98 100644 --- a/engine/collection/compliance/core.go +++ b/engine/collection/compliance/core.go @@ -94,7 +94,7 @@ func (c *Core) OnBlockProposal(originID flow.Identifier, proposal *messages.Clus Int("tx_count", proposal.Payload.Collection.Len()). Time("timestamp", header.Timestamp). Hex("proposer", header.ProposerID[:]). - Int("num_signers", len(header.ParentVoterIDs)). + Hex("signers", header.ParentVoterIndices). Logger() log.Info().Msg("block proposal received") @@ -270,7 +270,7 @@ func (c *Core) processBlockProposal(proposal *messages.ClusterBlockProposal) err Hex("payload_hash", header.PayloadHash[:]). Time("timestamp", header.Timestamp). Hex("proposer", header.ProposerID[:]). - Int("num_signers", len(header.ParentVoterIDs)). + Hex("parent_signer_indices", header.ParentVoterIndices). Logger() log.Info().Msg("processing block proposal") diff --git a/engine/collection/compliance/engine.go b/engine/collection/compliance/engine.go index c33aaa4b89a..dd8f05d1e06 100644 --- a/engine/collection/compliance/engine.go +++ b/engine/collection/compliance/engine.go @@ -359,20 +359,25 @@ func (e *Engine) BroadcastProposalWithDelay(header *flow.Header, delay time.Dura header.ChainID = parent.ChainID header.Height = parent.Height + 1 - log := e.log.With(). - Hex("block_id", logging.ID(header.ID())). - Uint64("block_height", header.Height). - Logger() - - log.Debug().Msg("preparing to broadcast proposal from hotstuff") - // retrieve the payload for the block payload, err := e.payloads.ByBlockID(header.ID()) if err != nil { return fmt.Errorf("could not get payload for block: %w", err) } - log = log.With().Int("collection_size", payload.Collection.Len()).Logger() + log := e.log.With(). + Str("chain_id", header.ChainID.String()). + Uint64("block_height", header.Height). + Uint64("block_view", header.View). + Hex("block_id", logging.ID(header.ID())). + Hex("parent_id", header.ParentID[:]). + Hex("ref_block", payload.ReferenceBlockID[:]). + Int("transaction_count", payload.Collection.Len()). + Hex("parent_signer_indices", header.ParentVoterIndices). + Dur("delay", delay). + Logger() + + log.Debug().Msg("processing cluster broadcast request from hotstuff") // retrieve all collection nodes in our cluster recipients, err := e.state.Final().Identities(filter.And( @@ -402,9 +407,7 @@ func (e *Engine) BroadcastProposalWithDelay(header *flow.Header, delay time.Dura return } - log.Debug(). - Str("recipients", fmt.Sprintf("%v", recipients.NodeIDs())). - Msg("broadcast proposal from hotstuff") + log.Info().Msg("cluster proposal proposed") e.metrics.MessageSent(metrics.EngineClusterCompliance, metrics.MessageClusterBlockProposal) block := &cluster.Block{ diff --git a/engine/collection/ingest/engine_test.go b/engine/collection/ingest/engine_test.go index 009bce4eb5f..7c372e45f94 100644 --- a/engine/collection/ingest/engine_test.go +++ b/engine/collection/ingest/engine_test.go @@ -13,6 +13,7 @@ import ( "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" @@ -87,7 +88,7 @@ func (suite *Suite) SetupTest() { }) assignments := unittest.ClusterAssignment(suite.N_CLUSTERS, collectors) - suite.clusters, err = flow.NewClusterList(assignments, collectors) + suite.clusters, err = factory.NewClusterList(assignments, collectors) suite.Require().NoError(err) suite.root = unittest.GenesisFixture() @@ -490,7 +491,7 @@ func (suite *Suite) TestRouting_ClusterAssignmentRemoved() { Filter(filter.Not(filter.HasNodeID(suite.me.NodeID()))). Filter(filter.HasRole(flow.RoleCollection)) epoch2Assignment := unittest.ClusterAssignment(suite.N_CLUSTERS, withoutMe) - epoch2Clusters, err := flow.NewClusterList(epoch2Assignment, withoutMe) + epoch2Clusters, err := factory.NewClusterList(epoch2Assignment, withoutMe) suite.Require().NoError(err) epoch2 := new(protocol.Epoch) @@ -529,7 +530,7 @@ func (suite *Suite) TestRouting_ClusterAssignmentAdded() { Filter(filter.Not(filter.HasNodeID(suite.me.NodeID()))). Filter(filter.HasRole(flow.RoleCollection)) epoch2Assignment := unittest.ClusterAssignment(suite.N_CLUSTERS, withoutMe) - epoch2Clusters, err := flow.NewClusterList(epoch2Assignment, withoutMe) + epoch2Clusters, err := factory.NewClusterList(epoch2Assignment, withoutMe) suite.Require().NoError(err) epoch2 := new(protocol.Epoch) @@ -558,7 +559,7 @@ func (suite *Suite) TestRouting_ClusterAssignmentAdded() { // include ourselves in cluster assignment withMe := suite.identities.Filter(filter.HasRole(flow.RoleCollection)) epoch3Assignment := unittest.ClusterAssignment(suite.N_CLUSTERS, withMe) - epoch3Clusters, err := flow.NewClusterList(epoch3Assignment, withMe) + epoch3Clusters, err := factory.NewClusterList(epoch3Assignment, withMe) suite.Require().NoError(err) epoch3 := new(protocol.Epoch) diff --git a/engine/collection/test/cluster_switchover_test.go b/engine/collection/test/cluster_switchover_test.go index dd7e4338446..63b678f5f76 100644 --- a/engine/collection/test/cluster_switchover_test.go +++ b/engine/collection/test/cluster_switchover_test.go @@ -14,6 +14,7 @@ import ( testmock "github.com/onflow/flow-go/engine/testutil/mock" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/util" @@ -59,20 +60,27 @@ func NewClusterSwitchoverTestCase(t *testing.T, conf ClusterSwitchoverTestConf) collectors := model.ToIdentityList(nodeInfos) tc.identities = unittest.CompleteIdentitySet(collectors...) assignment := unittest.ClusterAssignment(tc.conf.clusters, collectors) - clusters, err := flow.NewClusterList(assignment, collectors) + clusters, err := factory.NewClusterList(assignment, collectors) require.NoError(t, err) rootClusterBlocks := run.GenerateRootClusterBlocks(1, clusters) rootClusterQCs := make([]flow.ClusterQCVoteData, len(rootClusterBlocks)) for i, cluster := range clusters { signers := make([]model.NodeInfo, 0) + signerIDs := make([]flow.Identifier, 0) for _, identity := range nodeInfos { if _, inCluster := cluster.ByNodeID(identity.NodeID); inCluster { signers = append(signers, identity) + signerIDs = append(signerIDs, identity.NodeID) } } qc, err := run.GenerateClusterRootQC(signers, model.ToIdentityList(signers), rootClusterBlocks[i]) require.NoError(t, err) - rootClusterQCs[i] = flow.ClusterQCVoteDataFromQC(qc) + rootClusterQCs[i] = flow.ClusterQCVoteDataFromQC(&flow.QuorumCertificateWithSignerIDs{ + View: qc.View, + BlockID: qc.BlockID, + SignerIDs: signerIDs, + SigData: qc.SigData, + }) } tc.sentTransactions = make(map[uint64]map[uint]flow.IdentifierList) @@ -130,18 +138,34 @@ func NewClusterSwitchoverTestCase(t *testing.T, conf ClusterSwitchoverTestConf) signer := nodeInfoLookup[signerID] signers = append(signers, signer) } + // generate root cluster block rootClusterBlock := cluster.CanonicalRootBlock(commit.Counter, model.ToIdentityList(signers)) // generate cluster root qc qc, err := run.GenerateClusterRootQC(signers, model.ToIdentityList(signers), rootClusterBlock) require.NoError(t, err) - commit.ClusterQCs[i] = flow.ClusterQCVoteDataFromQC(qc) + signerIDs := toSignerIDs(signers) + qcWithSignerIDs := &flow.QuorumCertificateWithSignerIDs{ + View: qc.View, + BlockID: qc.BlockID, + SignerIDs: signerIDs, + SigData: qc.SigData, + } + commit.ClusterQCs[i] = flow.ClusterQCVoteDataFromQC(qcWithSignerIDs) } }) return tc } +func toSignerIDs(signers []model.NodeInfo) []flow.Identifier { + signerIDs := make([]flow.Identifier, 0, len(signers)) + for _, signer := range signers { + signerIDs = append(signerIDs, signer.NodeID) + } + return signerIDs +} + // TestClusterSwitchover_Simple is the simplest switchover case with one single-node cluster. func TestClusterSwitchover_Simple(t *testing.T) { RunTestCase(NewClusterSwitchoverTestCase(t, ClusterSwitchoverTestConf{ diff --git a/engine/common/follower/engine.go b/engine/common/follower/engine.go index 88acb9b196c..4f9ebe12a93 100644 --- a/engine/common/follower/engine.go +++ b/engine/common/follower/engine.go @@ -195,7 +195,6 @@ func (e *Engine) onBlockProposal(originID flow.Identifier, proposal *messages.Bl Hex("payload_hash", header.PayloadHash[:]). Time("timestamp", header.Timestamp). Hex("proposer", header.ProposerID[:]). - Int("num_signers", len(header.ParentVoterIDs)). Logger() log.Info().Msg("block proposal received") @@ -329,7 +328,6 @@ func (e *Engine) processBlockProposal(ctx context.Context, proposal *messages.Bl Hex("payload_hash", header.PayloadHash[:]). Time("timestamp", header.Timestamp). Hex("proposer", header.ProposerID[:]). - Int("num_signers", len(header.ParentVoterIDs)). Logger() log.Info().Msg("processing block proposal") diff --git a/engine/common/rpc/convert/convert.go b/engine/common/rpc/convert/convert.go index 5efd384aa8f..d8dfa090d06 100644 --- a/engine/common/rpc/convert/convert.go +++ b/engine/common/rpc/convert/convert.go @@ -142,8 +142,6 @@ func BlockHeaderToMessage(h *flow.Header) (*entities.BlockHeader, error) { t := timestamppb.New(h.Timestamp) - parentVoterIds := IdentifiersToMessages(h.ParentVoterIDs) - return &entities.BlockHeader{ Id: id[:], ParentId: h.ParentID[:], @@ -151,7 +149,7 @@ func BlockHeaderToMessage(h *flow.Header) (*entities.BlockHeader, error) { PayloadHash: h.PayloadHash[:], Timestamp: t, View: h.View, - ParentVoterIds: parentVoterIds, + ParentVoterIndices: h.ParentVoterIndices, ParentVoterSigData: h.ParentVoterSigData, ProposerId: h.ProposerID[:], ProposerSigData: h.ProposerSigData, @@ -160,7 +158,6 @@ func BlockHeaderToMessage(h *flow.Header) (*entities.BlockHeader, error) { } func MessageToBlockHeader(m *entities.BlockHeader) (*flow.Header, error) { - parentVoterIds := MessagesToIdentifiers(m.ParentVoterIds) chainId, err := MessageToChainId(m.ChainId) if err != nil { return nil, fmt.Errorf("failed to convert ChainId: %w", err) @@ -171,7 +168,7 @@ func MessageToBlockHeader(m *entities.BlockHeader) (*flow.Header, error) { PayloadHash: MessageToIdentifier(m.PayloadHash), Timestamp: m.Timestamp.AsTime(), View: m.View, - ParentVoterIDs: parentVoterIds, + ParentVoterIndices: m.ParentVoterIndices, ParentVoterSigData: m.ParentVoterSigData, ProposerID: MessageToIdentifier(m.ProposerId), ProposerSigData: m.ProposerSigData, @@ -369,7 +366,7 @@ func CollectionGuaranteeToMessage(g *flow.CollectionGuarantee) *entities.Collect Signatures: [][]byte{g.Signature}, ReferenceBlockId: IdentifierToMessage(g.ReferenceBlockID), Signature: g.Signature, - SignerIds: IdentifiersToMessages(g.SignerIDs), + SignerIndices: g.SignerIndices, } } @@ -377,7 +374,7 @@ func MessageToCollectionGuarantee(m *entities.CollectionGuarantee) *flow.Collect return &flow.CollectionGuarantee{ CollectionID: MessageToIdentifier(m.CollectionId), ReferenceBlockID: MessageToIdentifier(m.ReferenceBlockId), - SignerIDs: MessagesToIdentifiers(m.SignerIds), + SignerIndices: m.SignerIndices, Signature: MessageToSignature(m.Signature), } } diff --git a/engine/consensus/compliance/core.go b/engine/consensus/compliance/core.go index 089d72c9e34..a2325f166c3 100644 --- a/engine/consensus/compliance/core.go +++ b/engine/consensus/compliance/core.go @@ -120,7 +120,7 @@ func (c *Core) OnBlockProposal(originID flow.Identifier, proposal *messages.Bloc Hex("payload_hash", header.PayloadHash[:]). Time("timestamp", header.Timestamp). Hex("proposer", header.ProposerID[:]). - Int("num_signers", len(header.ParentVoterIDs)). + Hex("parent_signer_indices", header.ParentVoterIndices). Str("traceID", traceID). // traceID is used to connect logs to traces Logger() log.Info().Msg("block proposal received") @@ -312,7 +312,7 @@ func (c *Core) processBlockProposal(proposal *messages.BlockProposal) error { Hex("payload_hash", header.PayloadHash[:]). Time("timestamp", header.Timestamp). Hex("proposer", header.ProposerID[:]). - Int("num_signers", len(header.ParentVoterIDs)). + Hex("parent_signer_indices", header.ParentVoterIndices). Logger() log.Info().Msg("processing block proposal") diff --git a/engine/consensus/compliance/engine.go b/engine/consensus/compliance/engine.go index bcae512cd2f..817f98c047b 100644 --- a/engine/consensus/compliance/engine.go +++ b/engine/consensus/compliance/engine.go @@ -351,8 +351,7 @@ func (e *Engine) BroadcastProposalWithDelay(header *flow.Header, delay time.Dura Int("seals_count", len(payload.Seals)). Int("receipts_count", len(payload.Receipts)). Time("timestamp", header.Timestamp). - Hex("proposer", header.ProposerID[:]). - Int("num_signers", len(header.ParentVoterIDs)). + Hex("signers", header.ParentVoterIndices). Dur("delay", delay). Logger() diff --git a/engine/consensus/ingestion/core.go b/engine/consensus/ingestion/core.go index b9af136c1df..bfd5d8dba93 100644 --- a/engine/consensus/ingestion/core.go +++ b/engine/consensus/ingestion/core.go @@ -9,11 +9,13 @@ import ( "github.com/rs/zerolog" + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" @@ -68,7 +70,7 @@ func (e *Core) OnGuarantee(originID flow.Identifier, guarantee *flow.CollectionG log := e.log.With(). Hex("origin_id", originID[:]). Hex("collection_id", guaranteeID[:]). - Int("signers", len(guarantee.SignerIDs)). + Hex("signers", guarantee.SignerIndices). Logger() log.Info().Msg("collection guarantee received") @@ -147,32 +149,41 @@ func (e *Core) validateExpiry(guarantee *flow.CollectionGuarantee) error { // nodes independently decide when a collection is finalized and we only check // that the guarantors are all from the same cluster. This implementation is NOT BFT. func (e *Core) validateGuarantors(guarantee *flow.CollectionGuarantee) error { - guarantors := guarantee.SignerIDs - if len(guarantors) == 0 { - return engine.NewInvalidInputError("invalid collection guarantee with no guarantors") - } - // get the clusters to assign the guarantee and check if the guarantor is part of it snapshot := e.state.AtBlockID(guarantee.ReferenceBlockID) - clusters, err := snapshot.Epochs().Current().Clustering() + cluster, err := snapshot.Epochs().Current().ClusterByChainID(guarantee.ChainID) + // reference block not found if errors.Is(err, storage.ErrNotFound) { - return engine.NewUnverifiableInputError("could not get clusters for unknown reference block (id=%x): %w", guarantee.ReferenceBlockID, err) + return engine.NewUnverifiableInputError( + "could not get clusters with chainID %v for unknown reference block (id=%x): %w", guarantee.ChainID, guarantee.ReferenceBlockID, err) } - if err != nil { - return fmt.Errorf("internal error retrieving collector clusters: %w", err) + // cluster not found by the chain ID + if errors.Is(err, protocol.ErrClusterNotFound) { + return engine.NewInvalidInputErrorf("cluster not found by chain ID %v, %w", guarantee.ChainID, err) } - cluster, _, ok := clusters.ByNodeID(guarantors[0]) - if !ok { - return engine.NewInvalidInputErrorf("guarantor (id=%s) does not exist in any cluster", guarantors[0]) + if err != nil { + return fmt.Errorf("internal error retrieving collector clusters for guarantee (ReferenceBlockID: %v, ChainID: %v): %w", + guarantee.ReferenceBlockID, guarantee.ChainID, err) } // ensure the guarantors are from the same cluster - clusterLookup := cluster.Lookup() - for _, guarantorID := range guarantors { - _, exists := clusterLookup[guarantorID] - if !exists { - return engine.NewInvalidInputError("inconsistent guarantors from different clusters") + clusterMembers := cluster.Members() + + // find guarantors by signer indices + guarantors, err := signature.DecodeSignerIndicesToIdentities(clusterMembers, guarantee.SignerIndices) + if err != nil { + if signature.IsDecodeSignerIndicesError(err) { + return engine.NewInvalidInputErrorf("could not decode guarantor indices: %v", err) } + // unexpected error + return fmt.Errorf("unexpected internal error decoding signer indices: %w", err) + } + + // determine whether signers reach minimally required stake threshold + threshold := hotstuff.ComputeWeightThresholdForBuildingQC(clusterMembers.TotalWeight()) // compute required stake threshold + totalStake := flow.IdentityList(guarantors).TotalWeight() + if totalStake < threshold { + return engine.NewInvalidInputErrorf("collection guarantee qc signers have insufficient stake of %d (required=%d)", totalStake, threshold) } return nil diff --git a/engine/consensus/ingestion/core_test.go b/engine/consensus/ingestion/core_test.go index 21ee4df77a2..dd5ba129add 100644 --- a/engine/consensus/ingestion/core_test.go +++ b/engine/consensus/ingestion/core_test.go @@ -6,12 +6,14 @@ import ( "testing" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" mockmempool "github.com/onflow/flow-go/module/mempool/mock" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/state/protocol" mockprotocol "github.com/onflow/flow-go/state/protocol/mock" @@ -64,7 +66,7 @@ func (suite *IngestionCoreSuite) SetupTest() { suite.execID = exec.NodeID suite.verifID = verif.NodeID - clusters := flow.ClusterList{flow.IdentityList{coll}} + clusters := flow.IdentityList{coll} identities := flow.IdentityList{access, con, coll, exec, verif} suite.finalIdentities = identities.Copy() @@ -79,6 +81,7 @@ func (suite *IngestionCoreSuite) SetupTest() { suite.epoch = &mockprotocol.Epoch{} headers := &mockstorage.Headers{} pool := &mockmempool.Guarantees{} + cluster := &mockprotocol.Cluster{} // this state basically works like a normal protocol state // returning everything correctly, using the created header @@ -106,7 +109,8 @@ func (suite *IngestionCoreSuite) SetupTest() { ) ref.On("Epochs").Return(suite.query) suite.query.On("Current").Return(suite.epoch) - suite.epoch.On("Clustering").Return(clusters, nil) + cluster.On("Members").Return(clusters) + suite.epoch.On("ClusterByChainID", head.ChainID).Return(cluster, nil) state.On("AtBlockID", mock.Anything).Return(ref) ref.On("Identity", mock.Anything).Return( @@ -195,7 +199,7 @@ func (suite *IngestionCoreSuite) TestOnGuaranteeNotAdded() { func (suite *IngestionCoreSuite) TestOnGuaranteeNoGuarantors() { // create a guarantee without any signers guarantee := suite.validGuarantee() - guarantee.SignerIDs = nil + guarantee.SignerIndices = nil // the guarantee is not part of the memory pool suite.pool.On("Has", guarantee.ID()).Return(false) @@ -210,29 +214,6 @@ func (suite *IngestionCoreSuite) TestOnGuaranteeNoGuarantors() { suite.pool.AssertNotCalled(suite.T(), "Add", guarantee) } -// TestOnGuaranteeInvalidRole verifies that a collection is rejected if any of -// the signers has a role _different_ than collection. -// We expect an engine.InvalidInputError. -func (suite *IngestionCoreSuite) TestOnGuaranteeInvalidRole() { - for _, invalidSigner := range []flow.Identifier{suite.accessID, suite.conID, suite.execID, suite.verifID} { - // add signer with role other than collector - guarantee := suite.validGuarantee() - guarantee.SignerIDs = append(guarantee.SignerIDs, invalidSigner) - - // the guarantee is not part of the memory pool - suite.pool.On("Has", guarantee.ID()).Return(false) - suite.pool.On("Add", guarantee).Return(true) - - // submit the guarantee as if it was sent by a consensus node - err := suite.core.OnGuarantee(suite.collID, guarantee) - suite.Assert().Error(err, "should error with missing guarantor") - suite.Assert().True(engine.IsInvalidInputError(err)) - - // check that the guarantee has _not_ been added to the mempool - suite.pool.AssertNotCalled(suite.T(), "Add", guarantee) - } -} - func (suite *IngestionCoreSuite) TestOnGuaranteeExpired() { // create an alternative block @@ -262,7 +243,7 @@ func (suite *IngestionCoreSuite) TestOnGuaranteeInvalidGuarantor() { // create a guarantee and add random (unknown) signer ID guarantee := suite.validGuarantee() - guarantee.SignerIDs = append(guarantee.SignerIDs, unittest.IdentifierFixture()) + guarantee.SignerIndices = []byte{4} // the guarantee is not part of the memory pool suite.pool.On("Has", guarantee.ID()).Return(false) @@ -324,7 +305,13 @@ func (suite *IngestionCoreSuite) TestOnGuaranteeUnknownOrigin() { // validGuarantee returns a valid collection guarantee based on the suite state. func (suite *IngestionCoreSuite) validGuarantee() *flow.CollectionGuarantee { guarantee := unittest.CollectionGuaranteeFixture() - guarantee.SignerIDs = []flow.Identifier{suite.collID} + guarantee.ChainID = suite.head.ChainID + + signerIndices, err := signature.EncodeSignersToIndices( + []flow.Identifier{suite.collID}, []flow.Identifier{suite.collID}) + require.NoError(suite.T(), err) + + guarantee.SignerIndices = signerIndices guarantee.ReferenceBlockID = suite.head.ID() return guarantee } diff --git a/engine/consensus/sealing/engine_test.go b/engine/consensus/sealing/engine_test.go index e0b597cd75d..cdc2031c45c 100644 --- a/engine/consensus/sealing/engine_test.go +++ b/engine/consensus/sealing/engine_test.go @@ -54,7 +54,7 @@ func (s *SealingEngineSuite) SetupTest() { }, ) - rootHeader, err := unittest.RootSnapshotFixture(unittest.IdentityListFixture(5)).Head() + rootHeader, err := unittest.RootSnapshotFixture(unittest.IdentityListFixture(5, unittest.WithAllRoles())).Head() require.NoError(s.T(), err) s.engine = &Engine{ diff --git a/engine/execution/execution_test.go b/engine/execution/execution_test.go index 67a88ec6d08..980e6061708 100644 --- a/engine/execution/execution_test.go +++ b/engine/execution/execution_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -17,9 +18,12 @@ import ( "github.com/onflow/flow-go/engine/testutil" testmock "github.com/onflow/flow-go/engine/testutil/mock" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/model/messages" + "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/stub" + "github.com/onflow/flow-go/state/cluster" "github.com/onflow/flow-go/utils/unittest" ) @@ -33,7 +37,7 @@ func sendBlock(exeNode *testmock.ExecutionNode, from flow.Identifier, proposal * // create a block that has two collections: col1 and col2; // col1 has tx1 and tx2, col2 has tx3 and tx4. // create another child block which will trigger the parent -// block to valid and be passed to the ingestion engine +// block to be incorporated and be passed to the ingestion engine func TestExecutionFlow(t *testing.T) { hub := stub.NewNetworkHub() @@ -56,7 +60,7 @@ func TestExecutionFlow(t *testing.T) { unittest.WithKeys, ) - identities := unittest.CompleteIdentitySet(colID, conID, exeID, verID) + identities := unittest.CompleteIdentitySet(colID, conID, exeID, verID).Sort(order.Canonical) // create execution node exeNode := testutil.ExecutionNode(t, hub, exeID, identities, 21, chainID) @@ -90,23 +94,40 @@ func TestExecutionFlow(t *testing.T) { col2.ID(): &col2, } - block := unittest.BlockWithParentAndProposerFixture(genesis, conID.NodeID) + clusterChainID := cluster.CanonicalClusterID(1, flow.IdentityList{colID}) + + // signed by the only collector + block := unittest.BlockWithParentAndProposerFixture(genesis, conID.NodeID, 1) + voterIndices, err := signature.EncodeSignersToIndices( + []flow.Identifier{conID.NodeID}, []flow.Identifier{conID.NodeID}) + require.NoError(t, err) + block.Header.ParentVoterIndices = voterIndices + signerIndices, err := signature.EncodeSignersToIndices( + []flow.Identifier{colID.NodeID}, []flow.Identifier{colID.NodeID}) + require.NoError(t, err) block.SetPayload(flow.Payload{ Guarantees: []*flow.CollectionGuarantee{ { CollectionID: col1.ID(), - SignerIDs: []flow.Identifier{colID.NodeID}, + SignerIndices: signerIndices, + ChainID: clusterChainID, ReferenceBlockID: genesis.ID(), }, { CollectionID: col2.ID(), - SignerIDs: []flow.Identifier{colID.NodeID}, + SignerIndices: signerIndices, + ChainID: clusterChainID, ReferenceBlockID: genesis.ID(), }, }, }) - child := unittest.BlockWithParentAndProposerFixture(block.Header, conID.NodeID) + child := unittest.BlockWithParentAndProposerFixture(block.Header, conID.NodeID, 1) + // the default signer indices is 2 bytes, but in this test cases + // we need 1 byte + child.Header.ParentVoterIndices = voterIndices + + log.Info().Msgf("child block ID: %v, indices: %x", child.Header.ID(), child.Header.ParentVoterIndices) collectionNode := testutil.GenericNodeFromParticipants(t, hub, colID, identities, chainID) defer collectionNode.Done() @@ -230,13 +251,24 @@ func deployContractBlock(t *testing.T, conID *flow.Identity, colID *flow.Identit // make collection col := &flow.Collection{Transactions: []*flow.TransactionBody{tx}} + signerIndices, err := signature.EncodeSignersToIndices( + []flow.Identifier{colID.NodeID}, []flow.Identifier{colID.NodeID}) + require.NoError(t, err) + + clusterChainID := cluster.CanonicalClusterID(1, flow.IdentityList{colID}) + // make block - block := unittest.BlockWithParentAndProposerFixture(parent, conID.NodeID) + block := unittest.BlockWithParentAndProposerFixture(parent, conID.NodeID, 1) + voterIndices, err := signature.EncodeSignersToIndices( + []flow.Identifier{conID.NodeID}, []flow.Identifier{conID.NodeID}) + require.NoError(t, err) + block.Header.ParentVoterIndices = voterIndices block.SetPayload(flow.Payload{ Guarantees: []*flow.CollectionGuarantee{ { CollectionID: col.ID(), - SignerIDs: []flow.Identifier{colID.NodeID}, + SignerIndices: signerIndices, + ChainID: clusterChainID, ReferenceBlockID: ref.ID(), }, }, @@ -258,11 +290,21 @@ func makePanicBlock(t *testing.T, conID *flow.Identity, colID *flow.Identity, ch // make collection col := &flow.Collection{Transactions: []*flow.TransactionBody{tx}} + clusterChainID := cluster.CanonicalClusterID(1, flow.IdentityList{colID}) // make block - block := unittest.BlockWithParentAndProposerFixture(parent, conID.NodeID) + block := unittest.BlockWithParentAndProposerFixture(parent, conID.NodeID, 1) + voterIndices, err := signature.EncodeSignersToIndices( + []flow.Identifier{conID.NodeID}, []flow.Identifier{conID.NodeID}) + require.NoError(t, err) + block.Header.ParentVoterIndices = voterIndices + + signerIndices, err := signature.EncodeSignersToIndices( + []flow.Identifier{colID.NodeID}, []flow.Identifier{colID.NodeID}) + require.NoError(t, err) + block.SetPayload(flow.Payload{ Guarantees: []*flow.CollectionGuarantee{ - {CollectionID: col.ID(), SignerIDs: []flow.Identifier{colID.NodeID}, ReferenceBlockID: ref.ID()}, + {CollectionID: col.ID(), SignerIndices: signerIndices, ChainID: clusterChainID, ReferenceBlockID: ref.ID()}, }, }) @@ -277,11 +319,20 @@ func makeSuccessBlock(t *testing.T, conID *flow.Identity, colID *flow.Identity, err := execTestutil.SignTransactionAsServiceAccount(tx, seq, chain) require.NoError(t, err) + signerIndices, err := signature.EncodeSignersToIndices( + []flow.Identifier{colID.NodeID}, []flow.Identifier{colID.NodeID}) + require.NoError(t, err) + clusterChainID := cluster.CanonicalClusterID(1, flow.IdentityList{colID}) + col := &flow.Collection{Transactions: []*flow.TransactionBody{tx}} - block := unittest.BlockWithParentAndProposerFixture(parent, conID.NodeID) + block := unittest.BlockWithParentAndProposerFixture(parent, conID.NodeID, 1) + voterIndices, err := signature.EncodeSignersToIndices( + []flow.Identifier{conID.NodeID}, []flow.Identifier{conID.NodeID}) + require.NoError(t, err) + block.Header.ParentVoterIndices = voterIndices block.SetPayload(flow.Payload{ Guarantees: []*flow.CollectionGuarantee{ - {CollectionID: col.ID(), SignerIDs: []flow.Identifier{colID.NodeID}, ReferenceBlockID: ref.ID()}, + {CollectionID: col.ID(), SignerIndices: signerIndices, ChainID: clusterChainID, ReferenceBlockID: ref.ID()}, }, }) @@ -290,21 +341,9 @@ func makeSuccessBlock(t *testing.T, conID *flow.Identity, colID *flow.Identity, return tx, col, block, proposal, seq + 1 } -// Test the following behaviors: -// (1) ENs sync statecommitment with each other -// (2) a failed transaction will not change statecommitment -// -// We prepare 3 transactions in 3 blocks: -// tx1 will deploy a contract -// tx2 will always panic -// tx3 will be succeed and change statecommitment -// and then create 2 EN nodes, both have tx1 executed. To test the synchronization, -// we send tx2 and tx3 in 2 blocks to only EN1, and check that tx2 will not change statecommitment for -// verifying behavior (1); -// and check EN2 should have the same statecommitment as EN1 since they sync -// with each other for verifying behavior (2). -// TODO: state sync is disabled, we are only verifying 2) for now. -func TestExecutionStateSyncMultipleExecutionNodes(t *testing.T) { +// Test a successful tx should change the statecommitment, +// but a failed Tx should not change the statecommitment. +func TestFailedTxWillNotChangeStateCommitment(t *testing.T) { hub := stub.NewNetworkHub() chainID := flow.Emulator @@ -492,12 +531,17 @@ func TestBroadcastToMultipleVerificationNodes(t *testing.T) { genesis, err := exeNode.State.AtHeight(0).Head() require.NoError(t, err) - block := unittest.BlockWithParentAndProposerFixture(genesis, conID.NodeID) + block := unittest.BlockWithParentAndProposerFixture(genesis, conID.NodeID, 1) + voterIndices, err := signature.EncodeSignersToIndices( + []flow.Identifier{conID.NodeID}, []flow.Identifier{conID.NodeID}) + require.NoError(t, err) + block.Header.ParentVoterIndices = voterIndices block.Header.View = 42 block.SetPayload(flow.Payload{}) proposal := unittest.ProposalFromBlock(&block) - child := unittest.BlockWithParentAndProposerFixture(block.Header, conID.NodeID) + child := unittest.BlockWithParentAndProposerFixture(block.Header, conID.NodeID, 1) + child.Header.ParentVoterIndices = voterIndices actualCalls := atomic.Uint64{} diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 795f23f25b6..64b00fcd9fc 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -390,7 +390,7 @@ func (e *Engine) reloadBlock( err = e.enqueueBlockAndCheckExecutable(blockByCollection, executionQueues, block, false) if err != nil { - return fmt.Errorf("could not enqueue block on reloading: %w", err) + return fmt.Errorf("could not enqueue block %x on reloading: %w", blockID, err) } return nil @@ -455,7 +455,7 @@ func (e *Engine) handleBlock(ctx context.Context, block *flow.Block) error { }) if err != nil { - return fmt.Errorf("could not enqueue block: %w", err) + return fmt.Errorf("could not enqueue block %v: %w", blockID, err) } return nil @@ -996,8 +996,12 @@ func (e *Engine) matchOrRequestCollections( Hex("collection_id", logging.ID(guarantee.ID())). Msg("requesting collection") + guarantors, err := protocol.FindGuarantors(e.state, guarantee) + if err != nil { + return fmt.Errorf("could not find guarantors: %w", err) + } // queue the collection to be requested from one of the guarantors - e.request.EntityByID(guarantee.ID(), filter.HasNodeID(guarantee.SignerIDs...)) + e.request.EntityByID(guarantee.ID(), filter.HasNodeID(guarantors...)) actualRequested++ } diff --git a/engine/execution/ingestion/engine_test.go b/engine/execution/ingestion/engine_test.go index 66521ad0f89..3f991022d9a 100644 --- a/engine/execution/ingestion/engine_test.go +++ b/engine/execution/ingestion/engine_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/golang/mock/gomock" + "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -24,9 +25,11 @@ import ( "github.com/onflow/flow-go/engine/testutil/mocklocal" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module/mempool/entity" "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mocks" + "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/network/mocknetwork" stateProtocol "github.com/onflow/flow-go/state/protocol" @@ -66,6 +69,7 @@ type testingContext struct { identity *flow.Identity broadcastedReceipts map[flow.Identifier]*flow.ExecutionReceipt collectionRequester *module.MockRequester + identities flow.IdentityList mu *sync.Mutex } @@ -116,7 +120,8 @@ func runWithEngine(t *testing.T, f func(testingContext)) { providerEngine.AssertExpectations(t) }() - identityList := flow.IdentityList{myIdentity, collection1Identity, collection2Identity, collection3Identity} + identityListUnsorted := flow.IdentityList{myIdentity, collection1Identity, collection2Identity, collection3Identity} + identityList := identityListUnsorted.Sort(order.Canonical) executionState.On("DiskSize").Return(int64(1024*1024), nil).Maybe() @@ -191,6 +196,7 @@ func runWithEngine(t *testing.T, f func(testingContext)) { snapshot: snapshot, identity: myIdentity, broadcastedReceipts: make(map[flow.Identifier]*flow.ExecutionReceipt), + identities: identityList, mu: &sync.Mutex{}, }) @@ -329,11 +335,31 @@ func (ctx testingContext) mockHasWeightAtBlockID(blockID flow.Identifier, hasWei } snap := new(protocol.Snapshot) snap.On("Identity", identity.NodeID).Return(&identity, nil) - ctx.state.On("AtBlockID", blockID).Return(snap) return snap } +func (ctx testingContext) mockSnapshot(header *flow.Header, identities flow.IdentityList) { + ctx.mockSnapshotWithBlockID(header.ID(), identities) +} + +func (ctx testingContext) mockSnapshotWithBlockID(blockID flow.Identifier, identities flow.IdentityList) { + cluster := new(protocol.Cluster) + // filter only collections as cluster members + cluster.On("Members").Return(identities.Filter(filter.HasRole(flow.RoleCollection))) + + epoch := new(protocol.Epoch) + epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) + + epochQuery := new(protocol.EpochQuery) + epochQuery.On("Current").Return(epoch) + + snap := new(protocol.Snapshot) + snap.On("Epochs").Return(epochQuery) + snap.On("Identity", mock.Anything).Return(identities[0], nil) + ctx.state.On("AtBlockID", blockID).Return(snap) +} + func (ctx *testingContext) stateCommitmentExist(blockID flow.Identifier, commit flow.StateCommitment) { ctx.executionState.On("StateCommitmentByBlockID", mock.Anything, blockID).Return(commit, nil) } @@ -391,7 +417,9 @@ func TestExecuteOneBlock(t *testing.T) { blockB := unittest.ExecutableBlockFixtureWithParent(nil, &blockA) blockB.StartState = unittest.StateCommitmentPointerFixture() + ctx.mockHasWeightAtBlockID(blockA.ID(), true) ctx.mockHasWeightAtBlockID(blockB.ID(), true) + ctx.mockSnapshot(blockB.Block.Header, unittest.IdentityListFixture(1)) // blockA's start state is its parent's state commitment, // and blockA's parent has been executed. @@ -423,6 +451,7 @@ func TestExecuteOneBlock(t *testing.T) { } func Test_OnlyHeadOfTheQueueIsExecuted(t *testing.T) { + unittest.SkipUnless(t, unittest.TEST_FLAKY, "To be fixed later") // only head of the queue should be executing. // Restarting node or errors in consensus module could trigger // block (or its parent) which already has been executed to be enqueued again @@ -587,7 +616,7 @@ func TestBlocksArentExecutedMultipleTimes_multipleBlockEnqueue(t *testing.T) { //blockCstartState := unittest.StateCommitmentFixture() blockC := unittest.ExecutableBlockFixtureWithParent([][]flow.Identifier{{colSigner}}, blockB.Block.Header) - //blockC.StartState = blockB.StartState //blocks are empty, so no state change is expected + blockC.StartState = blockB.StartState //blocks are empty, so no state change is expected logBlocks(map[string]*entity.ExecutableBlock{ "B": blockB, @@ -684,15 +713,24 @@ func TestBlocksArentExecutedMultipleTimes_collectionArrival(t *testing.T) { // It should rather not occur during normal execution because StartState won't be set // before parent has finished, but we should handle this edge case that it is set as well. - colSigner := unittest.IdentifierFixture() - - // A <- B <- C <- D + // A (0 collection) <- B (0 collection) <- C (0 collection) <- D (1 collection) blockA := unittest.BlockHeaderFixture() blockB := unittest.ExecutableBlockFixtureWithParent(nil, &blockA) blockB.StartState = unittest.StateCommitmentPointerFixture() + collectionIdentities := ctx.identities.Filter(filter.HasRole(flow.RoleCollection)) + colSigner := collectionIdentities[0].ID() blockC := unittest.ExecutableBlockFixtureWithParent([][]flow.Identifier{{colSigner}}, blockB.Block.Header) blockC.StartState = blockB.StartState //blocks are empty, so no state change is expected + // the default fixture uses a 10 collectors committee, but in this test case, there are only 4, + // so we need to update the signer indices. + // set the first identity as signer + log.Info().Msgf("canonical collection list %v", collectionIdentities.NodeIDs()) + log.Info().Msgf("full list %v", ctx.identities) + indices, err := + signature.EncodeSignersToIndices(collectionIdentities.NodeIDs(), []flow.Identifier{colSigner}) + require.NoError(t, err) + blockC.Block.Payload.Guarantees[0].SignerIndices = indices // block D to make sure execution resumes after block C multiple execution has been prevented blockD := unittest.ExecutableBlockFixtureWithParent(nil, blockC.Block.Header) @@ -712,6 +750,13 @@ func TestBlocksArentExecutedMultipleTimes_collectionArrival(t *testing.T) { wg := sync.WaitGroup{} ctx.mockStateCommitsWithMap(commits) + // mock the cluster canonical list at the collection guarantee's reference block + // use the same canonical list as used for building signer indices + ctx.mockSnapshotWithBlockID(unittest.FixedReferenceBlockID(), ctx.identities) + ctx.mockSnapshot(blockB.Block.Header, ctx.identities) + ctx.mockSnapshot(blockC.Block.Header, ctx.identities) + ctx.mockSnapshot(blockD.Block.Header, ctx.identities) + ctx.state.On("Sealed").Return(ctx.snapshot) ctx.snapshot.On("Head").Return(&blockA, nil) @@ -764,7 +809,7 @@ func TestBlocksArentExecutedMultipleTimes_collectionArrival(t *testing.T) { }).Times(1) wg.Add(1) // wait for block B to be executed - err := ctx.engine.handleBlock(context.Background(), blockB.Block) + err = ctx.engine.handleBlock(context.Background(), blockB.Block) require.NoError(t, err) wg.Add(1) // wait for block C to be executed @@ -831,9 +876,14 @@ func TestExecuteBlockInOrder(t *testing.T) { // make sure the seal height won't trigger state syncing, so that all blocks // will be executed. + ctx.mockHasWeightAtBlockID(blocks["A"].ID(), true) ctx.state.On("Sealed").Return(ctx.snapshot) // a receipt for sealed block won't be broadcasted ctx.snapshot.On("Head").Return(&blockSealed, nil) + ctx.mockSnapshot(blocks["A"].Block.Header, unittest.IdentityListFixture(1)) + ctx.mockSnapshot(blocks["B"].Block.Header, unittest.IdentityListFixture(1)) + ctx.mockSnapshot(blocks["C"].Block.Header, unittest.IdentityListFixture(1)) + ctx.mockSnapshot(blocks["D"].Block.Header, unittest.IdentityListFixture(1)) // once block A is computed, it should trigger B and C being sent to compute, // which in turn should trigger D @@ -1041,6 +1091,8 @@ func TestUnauthorizedNodeDoesNotBroadcastReceipts(t *testing.T) { ctx.snapshot.On("Head").Return(&blockSealed, nil) ctx.mockHasWeightAtBlockID(blocks["A"].ID(), true) + identity := *ctx.identity + identity.Weight = 0 ctx.assertSuccessfulBlockComputation(commits, onPersisted, blocks["A"], unittest.IdentifierFixture(), true, *blocks["A"].StartState, nil) ctx.assertSuccessfulBlockComputation(commits, onPersisted, blocks["B"], unittest.IdentifierFixture(), false, *blocks["B"].StartState, nil) @@ -1049,24 +1101,28 @@ func TestUnauthorizedNodeDoesNotBroadcastReceipts(t *testing.T) { wg.Add(1) ctx.mockHasWeightAtBlockID(blocks["A"].ID(), true) + ctx.mockSnapshot(blocks["A"].Block.Header, flow.IdentityList{ctx.identity}) err := ctx.engine.handleBlock(context.Background(), blocks["A"].Block) require.NoError(t, err) wg.Add(1) ctx.mockHasWeightAtBlockID(blocks["B"].ID(), false) + ctx.mockSnapshot(blocks["B"].Block.Header, flow.IdentityList{&identity}) // unauthorized err = ctx.engine.handleBlock(context.Background(), blocks["B"].Block) require.NoError(t, err) wg.Add(1) ctx.mockHasWeightAtBlockID(blocks["C"].ID(), true) + ctx.mockSnapshot(blocks["C"].Block.Header, flow.IdentityList{ctx.identity}) err = ctx.engine.handleBlock(context.Background(), blocks["C"].Block) require.NoError(t, err) wg.Add(1) ctx.mockHasWeightAtBlockID(blocks["D"].ID(), false) + ctx.mockSnapshot(blocks["D"].Block.Header, flow.IdentityList{&identity}) // unauthorized err = ctx.engine.handleBlock(context.Background(), blocks["D"].Block) require.NoError(t, err) diff --git a/engine/testutil/nodes.go b/engine/testutil/nodes.go index 3361321c74a..0e797fac897 100644 --- a/engine/testutil/nodes.go +++ b/engine/testutil/nodes.go @@ -8,6 +8,8 @@ import ( "testing" "time" + "github.com/onflow/flow-go/module/signature" + "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -680,12 +682,14 @@ func getRoot(t *testing.T, node *testmock.GenericNode) (*flow.Header, *flow.Quor require.NoError(t, err) signerIDs := signers.NodeIDs() + signerIndices, err := signature.EncodeSignersToIndices(signerIDs, signerIDs) + require.NoError(t, err) rootQC := &flow.QuorumCertificate{ - View: rootHead.View, - BlockID: rootHead.ID(), - SignerIDs: signerIDs, - SigData: unittest.SignatureFixture(), + View: rootHead.View, + BlockID: rootHead.ID(), + SignerIndices: signerIndices, + SigData: unittest.SignatureFixture(), } return rootHead, rootQC @@ -696,8 +700,8 @@ type RoundRobinLeaderSelection struct { me flow.Identifier } -func (s *RoundRobinLeaderSelection) Identities(blockID flow.Identifier, selector flow.IdentityFilter) (flow.IdentityList, error) { - return s.identities.Filter(selector), nil +func (s *RoundRobinLeaderSelection) Identities(blockID flow.Identifier) (flow.IdentityList, error) { + return s.identities, nil } func (s *RoundRobinLeaderSelection) Identity(blockID flow.Identifier, participantID flow.Identifier) (*flow.Identity, error) { diff --git a/fvm/fvm_test.go b/fvm/fvm_test.go index bc4bf519e7e..2e526ee2677 100644 --- a/fvm/fvm_test.go +++ b/fvm/fvm_test.go @@ -102,35 +102,35 @@ func transferTokensTx(chain flow.Chain) *flow.TransactionBody { // // The withdraw amount and the account from getAccount // would be the parameters to the transaction - + import FungibleToken from 0x%s import FlowToken from 0x%s - + transaction(amount: UFix64, to: Address) { - + // The Vault resource that holds the tokens that are being transferred let sentVault: @FungibleToken.Vault - + prepare(signer: AuthAccount) { - + // Get a reference to the signer's stored vault let vaultRef = signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) ?? panic("Could not borrow reference to the owner's Vault!") - + // Withdraw tokens from the signer's stored vault self.sentVault <- vaultRef.withdraw(amount: amount) } - + execute { - + // Get the recipient's public account object let recipient = getAccount(to) - + // Get a reference to the recipient's Receiver let receiverRef = recipient.getCapability(/public/flowTokenReceiver) .borrow<&{FungibleToken.Receiver}>() ?? panic("Could not borrow receiver reference to the recipient's Vault") - + // Deposit the withdrawn tokens in the recipient's receiver receiverRef.deposit(from: <-self.sentVault) } @@ -155,7 +155,7 @@ import FlowContractAudits from 0x%s transaction(deployAddress: Address, code: String) { prepare(serviceAccount: AuthAccount) { - + let auditorAdmin = serviceAccount.borrow<&FlowContractAudits.Administrator>(from: FlowContractAudits.AdminStoragePath) ?? panic("Could not borrow a reference to the admin resource") @@ -1027,7 +1027,7 @@ func TestBlockContext_ExecuteTransaction_StorageLimit(t *testing.T) { // deposit additional flow let payment <- vaultRef.withdraw(amount: 10.0) as! @FlowToken.Vault - let receiver = signer.getCapability(/public/flowTokenReceiver)!.borrow<&{FungibleToken.Receiver}>() + let receiver = signer.getCapability(/public/flowTokenReceiver)!.borrow<&{FungibleToken.Receiver}>() ?? panic("Could not borrow receiver reference to the recipient's Vault") receiver.deposit(from: <-payment) } @@ -1659,7 +1659,7 @@ func TestBlockContext_UnsafeRandom(t *testing.T) { num, err := strconv.ParseUint(tx.Logs[0], 10, 64) require.NoError(t, err) - require.Equal(t, uint64(0xb9c618010e32a0fb), num) + require.Equal(t, uint64(0x8872445cb397f6d2), num) }) } @@ -2550,7 +2550,7 @@ func TestHashing(t *testing.T) { return []byte(fmt.Sprintf( ` import Crypto - + pub fun main(data: [UInt8]): [UInt8] { return Crypto.hash(data, algorithm: HashAlgorithm.%s) } @@ -2560,7 +2560,7 @@ func TestHashing(t *testing.T) { return []byte(fmt.Sprintf( ` import Crypto - + pub fun main(data: [UInt8], tag: String): [UInt8] { return Crypto.hashWithTag(data, tag: tag, algorithm: HashAlgorithm.%s) } @@ -2933,13 +2933,13 @@ func TestBlockContext_ExecuteTransaction_FailingTransactions(t *testing.T) { code := []byte(fmt.Sprintf(` import FungibleToken from 0x%s import FlowToken from 0x%s - + pub fun main(account: Address): UFix64 { let acct = getAccount(account) let vaultRef = acct.getCapability(/public/flowTokenBalance) .borrow<&FlowToken.Vault{FungibleToken.Balance}>() ?? panic("Could not borrow Balance reference to the Vault") - + return vaultRef.balance } `, fvm.FungibleTokenAddress(chain), fvm.FlowTokenAddress(chain))) @@ -3167,13 +3167,13 @@ func TestTransactionFeeDeduction(t *testing.T) { code := []byte(fmt.Sprintf(` import FungibleToken from 0x%s import FlowToken from 0x%s - + pub fun main(account: Address): UFix64 { let acct = getAccount(account) let vaultRef = acct.getCapability(/public/flowTokenBalance) .borrow<&FlowToken.Vault{FungibleToken.Balance}>() ?? panic("Could not borrow Balance reference to the Vault") - + return vaultRef.balance } `, fvm.FungibleTokenAddress(chain), fvm.FlowTokenAddress(chain))) diff --git a/go.mod b/go.mod index e52f3a26728..fcaf85e93d5 100644 --- a/go.mod +++ b/go.mod @@ -55,13 +55,13 @@ require ( github.com/multiformats/go-multihash v0.1.0 github.com/onflow/atree v0.3.0 github.com/onflow/cadence v0.21.3-0.20220513161637-08b93d4bb7b9 - github.com/onflow/flow v0.2.5 + github.com/onflow/flow v0.3.0 github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220513155751-c4c1f8d59f83 github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220513155751-c4c1f8d59f83 github.com/onflow/flow-emulator v0.31.2-0.20220513151845-ef7513cb1cd0 github.com/onflow/flow-go-sdk v0.24.1-0.20220513205729-d1f58d47c4e3 github.com/onflow/flow-go/crypto v0.24.3 - github.com/onflow/flow/protobuf/go/flow v0.2.5 + github.com/onflow/flow/protobuf/go/flow v0.3.0 github.com/opentracing/opentracing-go v1.2.0 github.com/pierrec/lz4 v2.6.1+incompatible github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index fc485f2053b..076ff6e2e68 100644 --- a/go.sum +++ b/go.sum @@ -1399,8 +1399,9 @@ github.com/onflow/cadence v0.21.3-0.20220511225809-808fe14141df/go.mod h1:vNIxF1 github.com/onflow/cadence v0.21.3-0.20220513161637-08b93d4bb7b9 h1:rrNPnxd6OBO3EcZYYTUSDlv6VMck694KZ1d5kE6HfKI= github.com/onflow/cadence v0.21.3-0.20220513161637-08b93d4bb7b9/go.mod h1:vNIxF13e1Ty50KnkQSm6LVwvxGGLTQ4kpldvTL+2S6s= github.com/onflow/flow v0.2.4/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP7+c= -github.com/onflow/flow v0.2.5 h1:d1LCeE+w+ef4QAC0zEAxfJn+N09bNKL8zXnfrihiSrs= github.com/onflow/flow v0.2.5/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP7+c= +github.com/onflow/flow v0.3.0 h1:qDKHFXh5HVZRxw+MpHdENXrqA2gVhmCq0CRW7X3ObLA= +github.com/onflow/flow v0.3.0/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP7+c= github.com/onflow/flow-core-contracts/lib/go/contracts v0.7.3-0.20210527134022-58c25247091a/go.mod h1:IZ2e7UyLCYmpQ8Kd7k0A32uXqdqfiV1r2sKs5/riblo= github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220413172500-d89ca96e6db3/go.mod h1:T6yhM+kWrFxiP6F3hh8lh9DcocHfmv48P4ITnjLhKSk= github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220422202806-92ad02a996cc/go.mod h1:T6yhM+kWrFxiP6F3hh8lh9DcocHfmv48P4ITnjLhKSk= @@ -1437,8 +1438,9 @@ github.com/onflow/flow/protobuf/go/flow v0.1.9/go.mod h1:kRugbzZjwQqvevJhrnnCFMJ github.com/onflow/flow/protobuf/go/flow v0.2.0/go.mod h1:kRugbzZjwQqvevJhrnnCFMJZNmoSJmxlKt6hTGXZojM= github.com/onflow/flow/protobuf/go/flow v0.2.2/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= github.com/onflow/flow/protobuf/go/flow v0.2.4/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= -github.com/onflow/flow/protobuf/go/flow v0.2.5 h1:IzkN5f3w/qFN2Mshc1I0yNgnT+YbFE5Htz/h8t/VL4c= github.com/onflow/flow/protobuf/go/flow v0.2.5/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= +github.com/onflow/flow/protobuf/go/flow v0.3.0 h1:DSnThQkS7Hbl1X99vUaXYdVKYfNWd5ZU5R8kl+SAPFM= +github.com/onflow/flow/protobuf/go/flow v0.3.0/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= github.com/onflow/fusd/lib/go/contracts v0.0.0-20211021081023-ae9de8fb2c7e/go.mod h1:CRX9eXtc9zHaRVTW1Xh4Cf5pZgKkQuu1NuSEVyHXr/0= github.com/onflow/sdks v0.4.2 h1:UdnXOdcIPIdD02n2SxQVGTJBAxGqJBgOkThxI3/IDnk= github.com/onflow/sdks v0.4.2/go.mod h1:F0dj0EyHC55kknLkeD10js4mo14yTdMotnWMslPirrU= diff --git a/integration/epochs/cluster_epoch_test.go b/integration/epochs/cluster_epoch_test.go index 589e767c420..ea7b575acc9 100644 --- a/integration/epochs/cluster_epoch_test.go +++ b/integration/epochs/cluster_epoch_test.go @@ -25,6 +25,7 @@ import ( "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/utils/unittest" ) @@ -91,7 +92,7 @@ func (s *Suite) CreateClusterList(clusterCount, nodesPerCluster int) (flow.Clust clusterAssignment := unittest.ClusterAssignment(uint(clusterCount), nodes) // create `ClusterList` object from nodes and assignment - clusterList, err := flow.NewClusterList(clusterAssignment, nodes) + clusterList, err := factory.NewClusterList(clusterAssignment, nodes) s.Require().NoError(err) return clusterList, nodes diff --git a/integration/go.mod b/integration/go.mod index 8c6673097c5..15b018bacf6 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -17,7 +17,7 @@ require ( github.com/onflow/flow-go v0.25.13-0.20220513151142-7858f76e703b // replaced by version on-disk github.com/onflow/flow-go-sdk v0.24.1-0.20220513205729-d1f58d47c4e3 github.com/onflow/flow-go/crypto v0.24.3 - github.com/onflow/flow/protobuf/go/flow v0.2.5 + github.com/onflow/flow/protobuf/go/flow v0.3.0 github.com/plus3it/gorecurcopy v0.0.1 github.com/rs/zerolog v1.26.1 github.com/stretchr/testify v1.7.1-0.20210824115523-ab6dc3262822 diff --git a/integration/go.sum b/integration/go.sum index f95c941903d..61f03e034c1 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -1489,7 +1489,7 @@ github.com/onflow/cadence v0.21.3-0.20220419065337-d5202c162010/go.mod h1:vNIxF1 github.com/onflow/cadence v0.21.3-0.20220511225809-808fe14141df/go.mod h1:vNIxF13e1Ty50KnkQSm6LVwvxGGLTQ4kpldvTL+2S6s= github.com/onflow/cadence v0.21.3-0.20220513161637-08b93d4bb7b9 h1:rrNPnxd6OBO3EcZYYTUSDlv6VMck694KZ1d5kE6HfKI= github.com/onflow/cadence v0.21.3-0.20220513161637-08b93d4bb7b9/go.mod h1:vNIxF13e1Ty50KnkQSm6LVwvxGGLTQ4kpldvTL+2S6s= -github.com/onflow/flow v0.2.5/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP7+c= +github.com/onflow/flow v0.3.0/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP7+c= github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220513155751-c4c1f8d59f83 h1:mpJirFu/JWMLV0IhKDZleVrVdN5B8QERV4gSXDef5bA= github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220513155751-c4c1f8d59f83/go.mod h1:T6yhM+kWrFxiP6F3hh8lh9DcocHfmv48P4ITnjLhKSk= github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220513155751-c4c1f8d59f83 h1:w4uXFTvjQmLtA/X50H4YXVlzbdsoL3vDI3Y86jtJOMM= @@ -1512,8 +1512,9 @@ github.com/onflow/flow-nft/lib/go/contracts v0.0.0-20210915191154-12ee8c507a0e/g github.com/onflow/flow/protobuf/go/flow v0.1.8/go.mod h1:kRugbzZjwQqvevJhrnnCFMJZNmoSJmxlKt6hTGXZojM= github.com/onflow/flow/protobuf/go/flow v0.1.9/go.mod h1:kRugbzZjwQqvevJhrnnCFMJZNmoSJmxlKt6hTGXZojM= github.com/onflow/flow/protobuf/go/flow v0.2.2/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= -github.com/onflow/flow/protobuf/go/flow v0.2.5 h1:IzkN5f3w/qFN2Mshc1I0yNgnT+YbFE5Htz/h8t/VL4c= github.com/onflow/flow/protobuf/go/flow v0.2.5/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= +github.com/onflow/flow/protobuf/go/flow v0.3.0 h1:DSnThQkS7Hbl1X99vUaXYdVKYfNWd5ZU5R8kl+SAPFM= +github.com/onflow/flow/protobuf/go/flow v0.3.0/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= github.com/onflow/fusd/lib/go/contracts v0.0.0-20211021081023-ae9de8fb2c7e/go.mod h1:CRX9eXtc9zHaRVTW1Xh4Cf5pZgKkQuu1NuSEVyHXr/0= github.com/onflow/sdks v0.4.2 h1:UdnXOdcIPIdD02n2SxQVGTJBAxGqJBgOkThxI3/IDnk= github.com/onflow/sdks v0.4.2/go.mod h1:F0dj0EyHC55kknLkeD10js4mo14yTdMotnWMslPirrU= diff --git a/integration/localnet/bootstrap.go b/integration/localnet/bootstrap.go index b47a2c6ee7c..a9a5ae41f38 100644 --- a/integration/localnet/bootstrap.go +++ b/integration/localnet/bootstrap.go @@ -90,12 +90,12 @@ func init() { func generateBootstrapData(flowNetworkConf testnet.NetworkConfig) []testnet.ContainerConfig { // Prepare localnet host folders, mapped to Docker container volumes upon `docker compose up` prepareCommonHostFolders() - _, _, _, flowNodeContainerConfigs, _, err := testnet.BootstrapNetwork(flowNetworkConf, BootstrapDir) + bootstrapData, err := testnet.BootstrapNetwork(flowNetworkConf, BootstrapDir) if err != nil { panic(err) } fmt.Println("Flow test network bootstrapping data generated...") - return flowNodeContainerConfigs + return bootstrapData.StakedConfs } // localnet/bootstrap.go generates a docker compose file with images configured for a diff --git a/integration/testnet/network.go b/integration/testnet/network.go index c4377ddccff..7d939e82889 100644 --- a/integration/testnet/network.go +++ b/integration/testnet/network.go @@ -32,15 +32,19 @@ import ( consensus_follower "github.com/onflow/flow-go/follower" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/cluster" dkgmod "github.com/onflow/flow-go/model/dkg" "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module/epochs" + "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/keyutils" clusterstate "github.com/onflow/flow-go/state/cluster" + "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/utils/io" "github.com/onflow/flow-go/utils/unittest" @@ -141,6 +145,7 @@ type FlowNetwork struct { seal *flow.Seal BootstrapDir string BootstrapSnapshot *inmem.Snapshot + BootstrapData *BootstrapData } // Identities returns a list of identities, one for each node in the network. @@ -561,9 +566,15 @@ func PrepareFlowNetwork(t *testing.T, networkConf NetworkConfig) *FlowNetwork { t.Logf("BootstrapDir: %s \n", bootstrapDir) - root, result, seal, confs, bootstrapSnapshot, err := BootstrapNetwork(networkConf, bootstrapDir) + bootstrapData, err := BootstrapNetwork(networkConf, bootstrapDir) require.Nil(t, err) + root := bootstrapData.Root + result := bootstrapData.Result + seal := bootstrapData.Seal + confs := bootstrapData.StakedConfs + bootstrapSnapshot := bootstrapData.Snapshot + logger := unittest.LoggerWithLevel(zerolog.InfoLevel).With(). Str("module", "flownetwork"). Str("testcase", t.Name()). @@ -586,6 +597,7 @@ func PrepareFlowNetwork(t *testing.T, networkConf NetworkConfig) *FlowNetwork { result: result, BootstrapDir: bootstrapDir, BootstrapSnapshot: bootstrapSnapshot, + BootstrapData: bootstrapData, } // check that at-least 2 full access nodes must be configure in your test suite @@ -746,9 +758,13 @@ func (net *FlowNetwork) AddNode(t *testing.T, bootstrapDir string, nodeConf Cont err = os.Mkdir(flowDataDir, 0700) require.NoError(t, err) - flowProfilerDir := filepath.Join(tmpdir, profilerDir) - err = os.Mkdir(flowProfilerDir, 0755) - require.NoError(t, err) + // create the profiler dir for the node + flowProfilerDir := filepath.Join(flowDataDir, "./profiler") + t.Logf("create profiler dir: %v", flowProfilerDir) + err = os.MkdirAll(flowProfilerDir, 0755) + if err != nil && !os.IsExist(err) { + panic(err) + } // create a directory for the bootstrap files // we create a node-specific bootstrap directory to enable testing nodes @@ -983,14 +999,23 @@ func followerNodeInfos(confs []ConsensusFollowerConfig) ([]bootstrap.NodeInfo, e return nodeInfos, nil } -func BootstrapNetwork(networkConf NetworkConfig, bootstrapDir string) (*flow.Block, *flow.ExecutionResult, *flow.Seal, []ContainerConfig, *inmem.Snapshot, error) { +type BootstrapData struct { + Root *flow.Block + Result *flow.ExecutionResult + Seal *flow.Seal + StakedConfs []ContainerConfig + Snapshot *inmem.Snapshot + ClusterRootBlocks []*cluster.Block +} + +func BootstrapNetwork(networkConf NetworkConfig, bootstrapDir string) (*BootstrapData, error) { chainID := flow.Localnet chain := chainID.Chain() // number of nodes nNodes := len(networkConf.Nodes) if nNodes == 0 { - return nil, nil, nil, nil, nil, fmt.Errorf("must specify at least one node") + return nil, fmt.Errorf("must specify at least one node") } // Sort so that access nodes start up last @@ -999,13 +1024,13 @@ func BootstrapNetwork(networkConf NetworkConfig, bootstrapDir string) (*flow.Blo // generate staking and networking keys for each configured node stakedConfs, err := setupKeys(networkConf) if err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("failed to setup keys: %w", err) + return nil, fmt.Errorf("failed to setup keys: %w", err) } // generate the follower node keys (follower nodes do not run as docker containers) followerInfos, err := followerNodeInfos(networkConf.ConsensusFollowers) if err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("failed to generate node info for consensus followers: %w", err) + return nil, fmt.Errorf("failed to generate node info for consensus followers: %w", err) } allNodeInfos := append(toNodeInfos(stakedConfs), followerInfos...) @@ -1016,7 +1041,7 @@ func BootstrapNetwork(networkConf NetworkConfig, bootstrapDir string) (*flow.Blo dkg, err := runDKG(stakedConfs) if err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("failed to run DKG: %w", err) + return nil, fmt.Errorf("failed to run DKG: %w", err) } // write private key files for each DKG participant @@ -1027,7 +1052,7 @@ func BootstrapNetwork(networkConf NetworkConfig, bootstrapDir string) (*flow.Blo path := fmt.Sprintf(bootstrap.PathRandomBeaconPriv, nodeID) err = WriteJSON(filepath.Join(bootstrapDir, path), sk) if err != nil { - return nil, nil, nil, nil, nil, err + return nil, err } } @@ -1040,17 +1065,17 @@ func BootstrapNetwork(networkConf NetworkConfig, bootstrapDir string) (*flow.Blo } err = utils.WriteStakingNetworkingKeyFiles(allNodeInfos, writeJSONFile) if err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("failed to write private key files: %w", err) + return nil, fmt.Errorf("failed to write private key files: %w", err) } err = utils.WriteSecretsDBEncryptionKeyFiles(allNodeInfos, writeFile) if err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("failed to write secrets db key files: %w", err) + return nil, fmt.Errorf("failed to write secrets db key files: %w", err) } err = utils.WriteMachineAccountFiles(chainID, stakedNodeInfos, writeJSONFile) if err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("failed to write machine account files: %w", err) + return nil, fmt.Errorf("failed to write machine account files: %w", err) } // define root block parameters @@ -1066,27 +1091,43 @@ func BootstrapNetwork(networkConf NetworkConfig, bootstrapDir string) (*flow.Blo // generate QC signerData, err := run.GenerateQCParticipantData(consensusNodes, consensusNodes, dkg) if err != nil { - return nil, nil, nil, nil, nil, err + return nil, err } votes, err := run.GenerateRootBlockVotes(root, signerData) if err != nil { - return nil, nil, nil, nil, nil, err + return nil, err } qc, err := run.GenerateRootQC(root, votes, signerData, signerData.Identities()) if err != nil { - return nil, nil, nil, nil, nil, err + return nil, err } // generate root blocks for each collector cluster - clusterAssignments, clusterQCs, err := setupClusterGenesisBlockQCs(networkConf.NClusters, epochCounter, stakedConfs) + clusterRootBlocks, clusterAssignments, clusterQCs, err := setupClusterGenesisBlockQCs(networkConf.NClusters, epochCounter, stakedConfs) if err != nil { - return nil, nil, nil, nil, nil, err + return nil, err + } + + // TODO: extract to func to be reused with `constructRootResultAndSeal` + qcsWithSignerIDs := make([]*flow.QuorumCertificateWithSignerIDs, 0, len(clusterQCs)) + for i, clusterQC := range clusterQCs { + members := clusterAssignments[i] + signerIDs, err := signature.DecodeSignerIndicesToIdentifiers(members, clusterQC.SignerIndices) + if err != nil { + return nil, fmt.Errorf("could not decode cluster QC signer indices: %w", err) + } + qcsWithSignerIDs = append(qcsWithSignerIDs, &flow.QuorumCertificateWithSignerIDs{ + View: clusterQC.View, + BlockID: clusterQC.BlockID, + SignerIDs: signerIDs, + SigData: clusterQC.SigData, + }) } randomSource := make([]byte, flow.EpochSetupRandomSourceLength) _, err = rand.Read(randomSource) if err != nil { - return nil, nil, nil, nil, nil, err + return nil, err } dkgOffsetView := root.Header.View + networkConf.ViewsInStakingAuction - 1 @@ -1106,14 +1147,14 @@ func BootstrapNetwork(networkConf NetworkConfig, bootstrapDir string) (*flow.Blo epochCommit := &flow.EpochCommit{ Counter: epochCounter, - ClusterQCs: flow.ClusterQCVoteDatasFromQCs(clusterQCs), + ClusterQCs: flow.ClusterQCVoteDatasFromQCs(qcsWithSignerIDs), DKGGroupKey: dkg.PubGroupKey, DKGParticipantKeys: dkg.PubKeyShares, } cdcRandomSource, err := cadence.NewString(hex.EncodeToString(randomSource)) if err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("could not convert random source: %w", err) + return nil, fmt.Errorf("could not convert random source: %w", err) } epochConfig := epochs.EpochConfig{ RewardCut: cadence.UFix64(0), @@ -1144,27 +1185,39 @@ func BootstrapNetwork(networkConf NetworkConfig, bootstrapDir string) (*flow.Blo fvm.WithIdentities(participants), ) if err != nil { - return nil, nil, nil, nil, nil, err + return nil, err } // generate execution result and block seal result := run.GenerateRootResult(root, commit, epochSetup, epochCommit) seal, err := run.GenerateRootSeal(result) if err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("generating root seal failed: %w", err) + return nil, fmt.Errorf("generating root seal failed: %w", err) } snapshot, err := inmem.SnapshotFromBootstrapState(root, result, seal, qc) if err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("could not create bootstrap state snapshot: %w", err) + return nil, fmt.Errorf("could not create bootstrap state snapshot: %w", err) + } + + err = badger.IsValidRootSnapshotQCs(snapshot) + if err != nil { + return nil, fmt.Errorf("invalid root snapshot qcs: %w", err) } err = WriteJSON(filepath.Join(bootstrapDir, bootstrap.PathRootProtocolStateSnapshot), snapshot.Encodable()) if err != nil { - return nil, nil, nil, nil, nil, err + return nil, err } - return root, result, seal, stakedConfs, snapshot, nil + return &BootstrapData{ + Root: root, + Result: result, + Seal: seal, + StakedConfs: stakedConfs, + Snapshot: snapshot, + ClusterRootBlocks: clusterRootBlocks, + }, nil } // setupKeys generates private staking and networking keys for each configured @@ -1254,16 +1307,18 @@ func runDKG(confs []ContainerConfig) (dkgmod.DKGData, error) { // setupClusterGenesisBlockQCs generates bootstrapping resources necessary for each collector cluster: // * a cluster-specific root block // * a cluster-specific root QC -func setupClusterGenesisBlockQCs(nClusters uint, epochCounter uint64, confs []ContainerConfig) (flow.AssignmentList, []*flow.QuorumCertificate, error) { +func setupClusterGenesisBlockQCs(nClusters uint, epochCounter uint64, confs []ContainerConfig) ([]*cluster.Block, flow.AssignmentList, []*flow.QuorumCertificate, error) { - participants := toParticipants(confs) + participantsUnsorted := toParticipants(confs) + participants := participantsUnsorted.Sort(order.Canonical) collectors := participants.Filter(filter.HasRole(flow.RoleCollection)) assignments := unittest.ClusterAssignment(nClusters, collectors) - clusters, err := flow.NewClusterList(assignments, collectors) + clusters, err := factory.NewClusterList(assignments, collectors) if err != nil { - return nil, nil, fmt.Errorf("could not create cluster list: %w", err) + return nil, nil, nil, fmt.Errorf("could not create cluster list: %w", err) } + rootBlocks := make([]*cluster.Block, 0, nClusters) qcs := make([]*flow.QuorumCertificate, 0, nClusters) for _, cluster := range clusters { @@ -1276,28 +1331,31 @@ func setupClusterGenesisBlockQCs(nClusters uint, epochCounter uint64, confs []Co } // gather cluster participants - participants := make([]bootstrap.NodeInfo, 0, len(cluster)) + clusterNodeInfos := make([]bootstrap.NodeInfo, 0, len(cluster)) for _, conf := range confs { _, exists := lookup[conf.NodeID] if exists { - participants = append(participants, conf.NodeInfo) + clusterNodeInfos = append(clusterNodeInfos, conf.NodeInfo) } } - if len(cluster) != len(participants) { // sanity check - return nil, nil, fmt.Errorf("requiring a node info for each cluster participant") + if len(cluster) != len(clusterNodeInfos) { // sanity check + return nil, nil, nil, fmt.Errorf("requiring a node info for each cluster participant") } - // generate qc for root cluster block - qc, err := run.GenerateClusterRootQC(participants, bootstrap.ToIdentityList(participants), block) + // must order in canonical ordering otherwise decoding signer indices from cluster QC would fail + clusterCommittee := bootstrap.ToIdentityList(clusterNodeInfos).Sort(order.Canonical) + qc, err := run.GenerateClusterRootQC(clusterNodeInfos, clusterCommittee, block) if err != nil { - return nil, nil, err + return nil, nil, nil, fmt.Errorf("fail to generate cluster root QC with clusterNodeInfos %v, %w", + clusterNodeInfos, err) } // add block and qc to list qcs = append(qcs, qc) + rootBlocks = append(rootBlocks, block) } - return assignments, qcs, nil + return rootBlocks, assignments, qcs, nil } // writePrivateKeyFiles writes the staking and machine account private key files. diff --git a/integration/tests/collection/suite.go b/integration/tests/collection/suite.go index 32b09da08dd..8c3c470ac11 100644 --- a/integration/tests/collection/suite.go +++ b/integration/tests/collection/suite.go @@ -16,6 +16,7 @@ import ( "github.com/onflow/flow-go/integration/tests/lib" "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/messages" clusterstate "github.com/onflow/flow-go/state/cluster" @@ -144,7 +145,7 @@ func (suite *CollectorSuite) Clusters() flow.ClusterList { suite.Require().True(ok) collectors := suite.net.Identities().Filter(filter.HasRole(flow.RoleCollection)) - clusters, err := flow.NewClusterList(setup.Assignments, collectors) + clusters, err := factory.NewClusterList(setup.Assignments, collectors) suite.Require().Nil(err) return clusters } diff --git a/integration/tests/consensus/inclusion_test.go b/integration/tests/consensus/inclusion_test.go index ed8609d2d99..94131abdaa4 100644 --- a/integration/tests/consensus/inclusion_test.go +++ b/integration/tests/consensus/inclusion_test.go @@ -16,6 +16,7 @@ import ( "github.com/onflow/flow-go/integration/tests/lib" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" + "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/utils/unittest" ) @@ -120,8 +121,13 @@ func (is *InclusionSuite) TestCollectionGuaranteeIncluded() { // generate a sentinel collection guarantee sentinel := unittest.CollectionGuaranteeFixture() - sentinel.SignerIDs = []flow.Identifier{is.collID} + // there is only one collection node in the cluster + clusterCommittee := flow.IdentifierList{is.collID} + signerIndices, err := signature.EncodeSignersToIndices(clusterCommittee, clusterCommittee) + require.NoError(t, err) + sentinel.SignerIndices = signerIndices sentinel.ReferenceBlockID = is.net.Root().ID() + sentinel.ChainID = is.net.BootstrapData.ClusterRootBlocks[0].Header.ChainID colID := sentinel.CollectionID is.waitUntilSeenProposal(deadline) diff --git a/ledger/common/bitutils/utils.go b/ledger/common/bitutils/utils.go index 300d805bb10..c9eb134b0af 100644 --- a/ledger/common/bitutils/utils.go +++ b/ledger/common/bitutils/utils.go @@ -41,5 +41,10 @@ func ClearBit(b []byte, i int) { // MakeBitVector allocates a byte slice of minimal size that can hold numberBits. func MakeBitVector(numberBits int) []byte { - return make([]byte, (numberBits+7)>>3) + return make([]byte, MinimalByteSliceLength(numberBits)) +} + +// MinimalByteSliceLength returns the minimal length of a byte slice that can store n bits. +func MinimalByteSliceLength(n int) int { + return (n + 7) >> 3 } diff --git a/ledger/common/bitutils/utils_test.go b/ledger/common/bitutils/utils_test.go index 7f0340394b5..f6d3e0d2383 100644 --- a/ledger/common/bitutils/utils_test.go +++ b/ledger/common/bitutils/utils_test.go @@ -23,6 +23,18 @@ func TestBitVectorAllocation(t *testing.T) { } } +// Test_PaddedByteSliceLength tests that MinimalByteSliceLength returns the +func Test_PaddedByteSliceLength(t *testing.T) { + for bits := 0; bits <= 127; bits++ { + numBytes := bits / 8 // integer division with floor + if bits%8 > 0 { + numBytes += 1 + } + + assert.Equal(t, numBytes, MinimalByteSliceLength(bits)) + } +} + func TestBitTools(t *testing.T) { seed := time.Now().UnixNano() t.Logf("rand seed is %d", seed) diff --git a/model/convert/service_event.go b/model/convert/service_event.go index 4223eee0c10..738e0f0dc24 100644 --- a/model/convert/service_event.go +++ b/model/convert/service_event.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/assignment" "github.com/onflow/flow-go/model/flow/order" ) @@ -181,7 +182,7 @@ func convertClusterAssignments(cdcClusters []cadence.Value) (flow.AssignmentList indices := make(map[uint]struct{}) // parse cluster assignments to Go types - assignments := make(flow.AssignmentList, len(cdcClusters)) + identifierLists := make([]flow.IdentifierList, len(cdcClusters)) for _, value := range cdcClusters { cdcCluster, ok := value.(cadence.Struct) @@ -223,10 +224,14 @@ func convertClusterAssignments(cdcClusters []cadence.Value) (flow.AssignmentList if err != nil { return nil, fmt.Errorf("could not convert hex string to identifer: %w", err) } - assignments[clusterIndex] = append(assignments[clusterIndex], nodeID) + + identifierLists[clusterIndex] = append(identifierLists[clusterIndex], nodeID) } } + // sort identifier lists in Canonical order + assignments := assignment.FromIdentifierLists(identifierLists) + return assignments, nil } diff --git a/model/encoding/sigtypes.go b/model/encoding/sigtypes.go new file mode 100644 index 00000000000..24cc46fb091 --- /dev/null +++ b/model/encoding/sigtypes.go @@ -0,0 +1,19 @@ +package encoding + +// SigType is the aggregable signature type. +type SigType uint8 + +// SigType specifies the role of the signature in the protocol. +// Both types are aggregatable cryptographic signatures. +// * SigTypeRandomBeacon type is for random beacon signatures. +// * SigTypeStaking is for Hotstuff signatures. +const ( + SigTypeStaking SigType = 0 + SigTypeRandomBeacon SigType = 1 +) + +// Valid returns true if the signature is either SigTypeStaking or SigTypeRandomBeacon +// else return false +func (t SigType) Valid() bool { + return t == SigTypeStaking || t == SigTypeRandomBeacon +} diff --git a/model/flow/account.go b/model/flow/account.go index 16acfb813c7..35606897abd 100644 --- a/model/flow/account.go +++ b/model/flow/account.go @@ -4,8 +4,7 @@ package flow import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" @@ -83,7 +82,7 @@ func (a *AccountPublicKey) UnmarshalJSON(data []byte) error { // - (TODO) It specifies a negative key weight func (a AccountPublicKey) Validate() error { if !CompatibleAlgorithms(a.SignAlgo, a.HashAlgo) { - return errors.Errorf( + return fmt.Errorf( "signing algorithm (%s) is incompatible with hashing algorithm (%s)", a.SignAlgo, a.HashAlgo, diff --git a/model/flow/assignment/sort.go b/model/flow/assignment/sort.go new file mode 100644 index 00000000000..3b135d91152 --- /dev/null +++ b/model/flow/assignment/sort.go @@ -0,0 +1,18 @@ +package assignment + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/order" +) + +// FromIdentifierLists creates a `flow.AssignmentList` with canonical ordering from +// the given `identifierLists`. +func FromIdentifierLists(identifierLists []flow.IdentifierList) flow.AssignmentList { + assignments := make(flow.AssignmentList, 0, len(identifierLists)) + // in place sort to order the assignment in canonical order + for _, identities := range identifierLists { + assignment := flow.IdentifierList(identities).Sort(order.IdentifierCanonical) + assignments = append(assignments, assignment) + } + return assignments +} diff --git a/model/flow/assignment/sort_test.go b/model/flow/assignment/sort_test.go new file mode 100644 index 00000000000..07aadf30f3f --- /dev/null +++ b/model/flow/assignment/sort_test.go @@ -0,0 +1,29 @@ +package assignment_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/assignment" +) + +// Check that FromIdentifierLists will sort the identifierList in canonical order +func TestSort(t *testing.T) { + node1, err := flow.HexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000001") + require.NoError(t, err) + node2, err := flow.HexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000002") + require.NoError(t, err) + node3, err := flow.HexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000003") + require.NoError(t, err) + + unsorted := []flow.IdentifierList{flow.IdentifierList{node2, node1, node3}} + + assignments := assignment.FromIdentifierLists(unsorted) + require.Len(t, assignments, 1) + + require.Equal(t, node1, assignments[0][0]) + require.Equal(t, node2, assignments[0][1]) + require.Equal(t, node3, assignments[0][2]) +} diff --git a/model/flow/cluster.go b/model/flow/cluster.go index ba2f87b69c7..ff02022f85c 100644 --- a/model/flow/cluster.go +++ b/model/flow/cluster.go @@ -7,7 +7,7 @@ import ( // AssignmentList is a list of identifier lists. Each list of identifiers lists the // identities that are part of the given cluster. -type AssignmentList [][]Identifier +type AssignmentList []IdentifierList // ClusterList is a list of identity lists. Each `IdentityList` represents the // nodes assigned to a specific cluster. diff --git a/model/flow/cluster_test.go b/model/flow/cluster_test.go index ae5f22d71a9..52d8f39e72c 100644 --- a/model/flow/cluster_test.go +++ b/model/flow/cluster_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/utils/unittest" ) @@ -17,7 +18,7 @@ func TestClusterAssignments(t *testing.T) { assignments := unittest.ClusterAssignment(10, identities) assert.Len(t, assignments, 10) - clusters, err := flow.NewClusterList(assignments, identities) + clusters, err := factory.NewClusterList(assignments, identities) require.NoError(t, err) assert.Equal(t, assignments, clusters.Assignments()) } diff --git a/model/flow/collectionGuarantee.go b/model/flow/collectionGuarantee.go index 1e241db7914..c05307c11a7 100644 --- a/model/flow/collectionGuarantee.go +++ b/model/flow/collectionGuarantee.go @@ -11,7 +11,8 @@ import ( type CollectionGuarantee struct { CollectionID Identifier // ID of the collection being guaranteed ReferenceBlockID Identifier // defines expiry of the collection - SignerIDs []Identifier // list of guarantors + ChainID ChainID // the chainID of the cluster in order to determine which cluster this guarantee belongs to + SignerIndices []byte // encoded indices of the signers Signature crypto.Signature // guarantor signatures } diff --git a/model/flow/epoch.go b/model/flow/epoch.go index 68d9d543b47..abdeab6f171 100644 --- a/model/flow/epoch.go +++ b/model/flow/epoch.go @@ -150,14 +150,14 @@ func (c *ClusterQCVoteData) EqualTo(other *ClusterQCVoteData) bool { // ClusterQCVoteDataFromQC converts a quorum certificate to the representation // used by the smart contract, essentially discarding the block ID and view // (which are protocol-defined given the EpochSetup event). -func ClusterQCVoteDataFromQC(qc *QuorumCertificate) ClusterQCVoteData { +func ClusterQCVoteDataFromQC(qc *QuorumCertificateWithSignerIDs) ClusterQCVoteData { return ClusterQCVoteData{ SigData: qc.SigData, VoterIDs: qc.SignerIDs, } } -func ClusterQCVoteDatasFromQCs(qcs []*QuorumCertificate) []ClusterQCVoteData { +func ClusterQCVoteDatasFromQCs(qcs []*QuorumCertificateWithSignerIDs) []ClusterQCVoteData { qcVotes := make([]ClusterQCVoteData, 0, len(qcs)) for _, qc := range qcs { qcVotes = append(qcVotes, ClusterQCVoteDataFromQC(qc)) diff --git a/model/flow/factory/cluster_list.go b/model/flow/factory/cluster_list.go new file mode 100644 index 00000000000..29bf374ac23 --- /dev/null +++ b/model/flow/factory/cluster_list.go @@ -0,0 +1,63 @@ +package factory + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/order" +) + +// NewClusterList creates a new cluster list based on the given cluster assignment +// and the provided list of identities. +// The caller must ensure each assignment contains identities ordered in canonical order, so that +// each cluster in the returned cluster list is ordered in canonical order as well. If not, +// an error will be returned. +func NewClusterList(assignments flow.AssignmentList, collectors flow.IdentityList) (flow.ClusterList, error) { + + // build a lookup for all the identities by node identifier + lookup := make(map[flow.Identifier]*flow.Identity) + for _, collector := range collectors { + lookup[collector.NodeID] = collector + } + if len(lookup) != len(collectors) { + return nil, fmt.Errorf("duplicate collector in list") + } + + // replicate the identifier list but use identities instead + clusters := make(flow.ClusterList, 0, len(assignments)) + for i, participants := range assignments { + cluster := make(flow.IdentityList, 0, len(participants)) + if len(participants) == 0 { + return nil, fmt.Errorf("particpants in assignment list is empty, cluster index %v", i) + } + + // Check assignments is sorted in canonical order + prev := participants[0] + + for i, participantID := range participants { + participant, found := lookup[participantID] + if !found { + return nil, fmt.Errorf("could not find collector identity (%x)", participantID) + } + cluster = append(cluster, participant) + delete(lookup, participantID) + + if i > 0 { + if !order.IdentifierCanonical(prev, participantID) { + return nil, fmt.Errorf("the assignments is not sorted in canonical order in cluster index %v, prev %v, next %v", + i, prev, participantID) + } + } + prev = participantID + } + + clusters = append(clusters, cluster) + } + + // check that every collector was assigned + if len(lookup) != 0 { + return nil, fmt.Errorf("missing collector assignments (%s)", lookup) + } + + return clusters, nil +} diff --git a/model/flow/factory/cluster_list_test.go b/model/flow/factory/cluster_list_test.go new file mode 100644 index 00000000000..0c938d5e8da --- /dev/null +++ b/model/flow/factory/cluster_list_test.go @@ -0,0 +1,25 @@ +package factory_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/factory" + "github.com/onflow/flow-go/utils/unittest" +) + +// NewClusterList assumes the input assignments are sorted, and fail if not. +// This tests verifies that NewClusterList has implemented the check on the assumption. +func TestNewClusterListFail(t *testing.T) { + identities := unittest.IdentityListFixture(100, unittest.WithRole(flow.RoleCollection)) + assignments := unittest.ClusterAssignment(10, identities) + + tmp := assignments[1][0] + assignments[1][0] = assignments[1][1] + assignments[1][1] = tmp + + _, err := factory.NewClusterList(assignments, identities) + require.Error(t, err) +} diff --git a/model/flow/filter/identity.go b/model/flow/filter/identity.go index 7d62f113041..2c312c05028 100644 --- a/model/flow/filter/identity.go +++ b/model/flow/filter/identity.go @@ -103,7 +103,7 @@ func HasRole(roles ...flow.Role) flow.IdentityFilter { // current epoch in good standing. var IsValidCurrentEpochParticipant = And( HasWeight(true), - Not(Ejected), + Not(Ejected), // ejection will change signer index ) // IsVotingConsensusCommitteeMember is a identity filter for all members of diff --git a/model/flow/header.go b/model/flow/header.go index 67a23cac33c..cc8c9b23d72 100644 --- a/model/flow/header.go +++ b/model/flow/header.go @@ -30,9 +30,7 @@ type Header struct { View uint64 // View number at which this block was proposed. - ParentVoterIDs []Identifier // List of voters who signed the parent block. - // A quorum certificate can be extrated from the header. - // This field is the SignerIDs field of the extracted quorum certificate. + ParentVoterIndices []byte // a bitvector that represents all the voters for the parent block. ParentVoterSigData []byte // aggregated signature over the parent block. Not a single cryptographic // signature since the data represents cryptographic signatures serialized in some way (concatenation or other) @@ -54,7 +52,7 @@ func (h Header) Body() interface{} { PayloadHash Identifier Timestamp uint64 View uint64 - ParentVoterIDs []Identifier + ParentVoterIndices []byte ParentVoterSigData []byte ProposerID Identifier }{ @@ -64,7 +62,7 @@ func (h Header) Body() interface{} { PayloadHash: h.PayloadHash, Timestamp: uint64(h.Timestamp.UnixNano()), View: h.View, - ParentVoterIDs: h.ParentVoterIDs, + ParentVoterIndices: h.ParentVoterIndices, ParentVoterSigData: h.ParentVoterSigData, ProposerID: h.ProposerID, } @@ -93,29 +91,21 @@ func (h Header) ID() Identifier { defer mutexHeader.Unlock() // compare these elements individually - if prevHeader.ParentVoterIDs != nil && + if prevHeader.ParentVoterIndices != nil && prevHeader.ParentVoterSigData != nil && prevHeader.ProposerSigData != nil && - len(h.ParentVoterIDs) == len(prevHeader.ParentVoterIDs) && + len(h.ParentVoterIndices) == len(prevHeader.ParentVoterIndices) && len(h.ParentVoterSigData) == len(prevHeader.ParentVoterSigData) && len(h.ProposerSigData) == len(prevHeader.ProposerSigData) { - bNotEqual := false - - for i, v := range h.ParentVoterIDs { - if v == prevHeader.ParentVoterIDs[i] { - continue - } - bNotEqual = true - break - } - if !bNotEqual && - h.ChainID == prevHeader.ChainID && + + if h.ChainID == prevHeader.ChainID && h.Timestamp == prevHeader.Timestamp && h.Height == prevHeader.Height && h.ParentID == prevHeader.ParentID && h.View == prevHeader.View && h.PayloadHash == prevHeader.PayloadHash && bytes.Equal(h.ProposerSigData, prevHeader.ProposerSigData) && + bytes.Equal(h.ParentVoterIndices, prevHeader.ParentVoterIndices) && bytes.Equal(h.ParentVoterSigData, prevHeader.ParentVoterSigData) && h.ProposerID == prevHeader.ProposerID { diff --git a/model/flow/header_test.go b/model/flow/header_test.go index f1b82037e33..e86e862a9d2 100644 --- a/model/flow/header_test.go +++ b/model/flow/header_test.go @@ -40,7 +40,7 @@ func TestHeaderFingerprint(t *testing.T) { PayloadHash flow.Identifier Timestamp uint64 View uint64 - ParentVoterIDs []flow.Identifier + ParentVoterIndices []byte ParentVoterSigData crypto.Signature ProposerID flow.Identifier } @@ -52,7 +52,7 @@ func TestHeaderFingerprint(t *testing.T) { PayloadHash: decoded.PayloadHash, Timestamp: time.Unix(0, int64(decoded.Timestamp)).UTC(), View: decoded.View, - ParentVoterIDs: decoded.ParentVoterIDs, + ParentVoterIndices: decoded.ParentVoterIndices, ParentVoterSigData: decoded.ParentVoterSigData, ProposerID: decoded.ProposerID, ProposerSigData: header.ProposerSigData, // since this field is not encoded/decoded, just set it to the original diff --git a/model/flow/identifier.go b/model/flow/identifier.go index d21005b81fd..57041040363 100644 --- a/model/flow/identifier.go +++ b/model/flow/identifier.go @@ -28,6 +28,9 @@ type Identifier [IdentifierLen]byte // IdentifierFilter is a filter on identifiers. type IdentifierFilter func(Identifier) bool +// IdentifierOrder is a sort for identifier +type IdentifierOrder func(Identifier, Identifier) bool + var ( // ZeroID is the lowest value in the 32-byte ID space. ZeroID = Identifier{} diff --git a/model/flow/identifierList.go b/model/flow/identifierList.go index 224340b7b02..33ce2447707 100644 --- a/model/flow/identifierList.go +++ b/model/flow/identifierList.go @@ -3,6 +3,7 @@ package flow import ( "bytes" "math/rand" + "sort" "github.com/rs/zerolog/log" ) @@ -115,3 +116,23 @@ IDLoop: } return dup } + +func (il IdentifierList) Sort(less IdentifierOrder) IdentifierList { + dup := il.Copy() + sort.Slice(dup, func(i int, j int) bool { + return less(dup[i], dup[j]) + }) + return dup +} + +// Sorted returns whether the list is sorted by the input ordering. +func (il IdentifierList) Sorted(less IdentifierOrder) bool { + for i := 0; i < len(il)-1; i++ { + a := il[i] + b := il[i+1] + if !less(a, b) { + return false + } + } + return true +} diff --git a/model/flow/identity.go b/model/flow/identity.go index 9040e046be4..2f44e30cf52 100644 --- a/model/flow/identity.go +++ b/model/flow/identity.go @@ -387,7 +387,7 @@ func (il IdentityList) Sorted(less IdentityOrder) bool { } // NodeIDs returns the NodeIDs of the nodes in the list. -func (il IdentityList) NodeIDs() []Identifier { +func (il IdentityList) NodeIDs() IdentifierList { nodeIDs := make([]Identifier, 0, len(il)) for _, id := range il { nodeIDs = append(nodeIDs, id.NodeID) diff --git a/model/flow/order/identifier.go b/model/flow/order/identifier.go new file mode 100644 index 00000000000..0102005b1b8 --- /dev/null +++ b/model/flow/order/identifier.go @@ -0,0 +1,13 @@ +package order + +import ( + "bytes" + + "github.com/onflow/flow-go/model/flow" +) + +// IdentifierCanonical is a function for sorting IdentifierList into +// canonical order +func IdentifierCanonical(id1 flow.Identifier, id2 flow.Identifier) bool { + return bytes.Compare(id1[:], id2[:]) < 0 +} diff --git a/model/flow/order/identity.go b/model/flow/order/identity.go index a5096cb5d73..5b78c7a3dd4 100644 --- a/model/flow/order/identity.go +++ b/model/flow/order/identity.go @@ -3,18 +3,16 @@ package order import ( - "bytes" - "github.com/onflow/flow-go/model/flow" ) // Canonical represents the canonical ordering for identity lists. -var Canonical = ByNodeIDAsc - -func ByNodeIDAsc(identity1 *flow.Identity, identity2 *flow.Identity) bool { - return bytes.Compare(identity1.NodeID[:], identity2.NodeID[:]) < 0 +func Canonical(identity1 *flow.Identity, identity2 *flow.Identity) bool { + return IdentifierCanonical(identity1.NodeID, identity2.NodeID) } +// ByReferenceOrder return a function for sorting identities based on the order +// of the given nodeIDs func ByReferenceOrder(nodeIDs []flow.Identifier) func(*flow.Identity, *flow.Identity) bool { indices := make(map[flow.Identifier]uint) for index, nodeID := range nodeIDs { @@ -28,3 +26,22 @@ func ByReferenceOrder(nodeIDs []flow.Identifier) func(*flow.Identity, *flow.Iden return indices[identity1.NodeID] < indices[identity2.NodeID] } } + +// IdentityListCanonical takes a list of identities and +// check if it's ordered in canonical order. +func IdentityListCanonical(identities flow.IdentityList) bool { + if len(identities) == 0 { + return true + } + + prev := identities[0].ID() + for i := 1; i < len(identities); i++ { + id := identities[i].ID() + if !IdentifierCanonical(prev, id) { + return false + } + prev = id + } + + return true +} diff --git a/model/flow/order/identity_test.go b/model/flow/order/identity_test.go new file mode 100644 index 00000000000..2c79b61ab4a --- /dev/null +++ b/model/flow/order/identity_test.go @@ -0,0 +1,18 @@ +// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED + +package order_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow/order" + "github.com/onflow/flow-go/utils/unittest" +) + +// Test the canonical ordering of identity and identifier match +func TestCanonicalOrderingMatch(t *testing.T) { + identities := unittest.IdentityListFixture(100) + require.Equal(t, identities.Sort(order.Canonical).NodeIDs(), identities.NodeIDs().Sort(order.IdentifierCanonical)) +} diff --git a/model/flow/quorum_certificate.go b/model/flow/quorum_certificate.go index d7aec581506..a74924693b3 100644 --- a/model/flow/quorum_certificate.go +++ b/model/flow/quorum_certificate.go @@ -7,21 +7,29 @@ type QuorumCertificate struct { View uint64 BlockID Identifier - // SignerIDs holds the IDs of HotStuff participants that voted for the block. - // Note that for the main consensus committee, members can provide a staking or a threshold signature - // to indicate their HotStuff vote. In addition to contributing to consensus progress, committee members - // contribute to running the Random Beacon if they express their vote through a threshold signature. - // In order to distinguish the signature types, the SigData has to be deserialized. Specifically, - // the field `SigData.SigType` (bit vector) indicates for each signer which sig type they provided. - // For collection cluster, the SignerIDs includes all the staking sig signers. - SignerIDs []Identifier + // SignerIndices encodes the HotStuff participants whose vote is included in this QC. + // For `n` authorized consensus nodes, `SignerIndices` is an n-bit vector (padded with tailing + // zeros to reach full bytes). We list the nodes in their canonical order, as defined by the protocol. + SignerIndices []byte // For consensus cluster, the SigData is a serialization of the following fields // - SigType []byte, bit-vector indicating the type of sig produced by the signer. - // - AggregatedStakingSig crypto.Signature, - // - AggregatedRandomBeaconSig crypto.Signature - // - ReconstrcutedRandomBeaconSig crypto.Signature + // - AggregatedStakingSig []byte + // - AggregatedRandomBeaconSig []byte + // - ReconstructedRandomBeaconSig crypto.Signature // For collector cluster HotStuff, SigData is simply the aggregated staking signatures // from all signers. SigData []byte } + +// QuorumCertificateWithSignerIDs is a QuorumCertificate, where the signing nodes are +// identified via their `flow.Identifier`s instead of indices. Working with IDs as opposed to +// indices is less efficient, but simpler, because we don't require a canonical node order. +// It is used for bootstrapping new Epochs, because the FlowEpoch smart contract has no +// notion of node ordering. +type QuorumCertificateWithSignerIDs struct { + View uint64 + BlockID Identifier + SignerIDs []Identifier + SigData []byte +} diff --git a/model/flow/sealing_segment_test.go b/model/flow/sealing_segment_test.go index f2f0e71e04d..d64826ad1e4 100644 --- a/model/flow/sealing_segment_test.go +++ b/model/flow/sealing_segment_test.go @@ -258,7 +258,7 @@ func (suite *SealingSegmentSuite) TestBuild_MultipleFinalBlockSeals() { // TestBuild_RootSegment tests we can build a valid root sealing segment. func (suite *SealingSegmentSuite) TestBuild_RootSegment() { - root, result, seal := unittest.BootstrapFixture(unittest.IdentityListFixture(5)) + root, result, seal := unittest.BootstrapFixture(unittest.IdentityListFixture(5, unittest.WithAllRoles())) suite.sealsByBlockID[root.ID()] = seal suite.addResult(result) err := suite.builder.AddBlock(root) @@ -277,7 +277,7 @@ func (suite *SealingSegmentSuite) TestBuild_RootSegment() { // a single-block sealing segment with a block view not equal to 0. func (suite *SealingSegmentSuite) TestBuild_RootSegmentWrongView() { - root, result, seal := unittest.BootstrapFixture(unittest.IdentityListFixture(5)) + root, result, seal := unittest.BootstrapFixture(unittest.IdentityListFixture(5, unittest.WithAllRoles())) root.Header.View = 10 // invalid root block view suite.sealsByBlockID[root.ID()] = seal suite.addResult(result) diff --git a/module/builder/consensus/builder.go b/module/builder/consensus/builder.go index caa8954f02a..4cc46b3ee7b 100644 --- a/module/builder/consensus/builder.go +++ b/module/builder/consensus/builder.go @@ -632,7 +632,7 @@ func (b *Builder) createProposal(parentID flow.Identifier, // NOTE: we could abstract all of this away into an interface{} field, // but that would be over the top as we will probably always use hotstuff View: 0, - ParentVoterIDs: nil, + ParentVoterIndices: nil, ParentVoterSigData: nil, ProposerID: flow.ZeroID, ProposerSigData: nil, diff --git a/module/epochs/qc_voter_test.go b/module/epochs/qc_voter_test.go index 33ef5a66d88..4fc4f604529 100644 --- a/module/epochs/qc_voter_test.go +++ b/module/epochs/qc_voter_test.go @@ -13,6 +13,7 @@ import ( hotstuff "github.com/onflow/flow-go/consensus/hotstuff/mocks" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/factory" flowmodule "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/epochs" module "github.com/onflow/flow-go/module/mock" @@ -76,7 +77,7 @@ func (suite *Suite) SetupTest() { var err error assignments := unittest.ClusterAssignment(2, suite.nodes) - suite.clustering, err = flow.NewClusterList(assignments, suite.nodes) + suite.clustering, err = factory.NewClusterList(assignments, suite.nodes) suite.Require().Nil(err) suite.epoch.On("Counter").Return(suite.counter, nil) diff --git a/module/finalizer/collection/finalizer.go b/module/finalizer/collection/finalizer.go index 5096d0c73e7..1f87950e560 100644 --- a/module/finalizer/collection/finalizer.go +++ b/module/finalizer/collection/finalizer.go @@ -163,8 +163,9 @@ func (f *Finalizer) MakeFinal(blockID flow.Identifier) error { Guarantee: flow.CollectionGuarantee{ CollectionID: payload.Collection.ID(), ReferenceBlockID: payload.ReferenceBlockID, - SignerIDs: step.ParentVoterIDs, - Signature: step.ParentVoterSigData, + ChainID: header.ChainID, + SignerIndices: step.ParentVoterIndices, + Signature: nil, // TODO: to remove because it's not easily verifiable by consensus nodes }, }) } diff --git a/module/finalizer/collection/finalizer_test.go b/module/finalizer/collection/finalizer_test.go index dc435210d9d..8dd1039560e 100644 --- a/module/finalizer/collection/finalizer_test.go +++ b/module/finalizer/collection/finalizer_test.go @@ -191,8 +191,9 @@ func TestFinalizer(t *testing.T) { Guarantee: flow.CollectionGuarantee{ CollectionID: block.Payload.Collection.ID(), ReferenceBlockID: refBlock.ID(), - SignerIDs: block.Header.ParentVoterIDs, - Signature: block.Header.ParentVoterSigData, + ChainID: block.Header.ChainID, + SignerIndices: block.Header.ParentVoterIndices, + Signature: nil, }, }) }) @@ -243,16 +244,18 @@ func TestFinalizer(t *testing.T) { Guarantee: flow.CollectionGuarantee{ CollectionID: block1.Payload.Collection.ID(), ReferenceBlockID: refBlock.ID(), - SignerIDs: block1.Header.ParentVoterIDs, - Signature: block1.Header.ParentVoterSigData, + ChainID: block1.Header.ChainID, + SignerIndices: block1.Header.ParentVoterIndices, + Signature: nil, }, }) prov.AssertCalled(t, "SubmitLocal", &messages.SubmitCollectionGuarantee{ Guarantee: flow.CollectionGuarantee{ CollectionID: block2.Payload.Collection.ID(), ReferenceBlockID: refBlock.ID(), - SignerIDs: block2.Header.ParentVoterIDs, - Signature: block2.Header.ParentVoterSigData, + ChainID: block2.Header.ChainID, + SignerIndices: block2.Header.ParentVoterIndices, + Signature: nil, }, }) }) @@ -303,8 +306,9 @@ func TestFinalizer(t *testing.T) { Guarantee: flow.CollectionGuarantee{ CollectionID: block1.Payload.Collection.ID(), ReferenceBlockID: refBlock.ID(), - SignerIDs: block1.Header.ParentVoterIDs, - Signature: block1.Header.ParentVoterSigData, + ChainID: block1.Header.ChainID, + SignerIndices: block1.Header.ParentVoterIndices, + Signature: nil, }, }) }) @@ -356,8 +360,9 @@ func TestFinalizer(t *testing.T) { Guarantee: flow.CollectionGuarantee{ CollectionID: block1.Payload.Collection.ID(), ReferenceBlockID: refBlock.ID(), - SignerIDs: block1.Header.ParentVoterIDs, - Signature: block1.Header.ParentVoterSigData, + ChainID: block1.Header.ChainID, + SignerIndices: block1.Header.ParentVoterIndices, + Signature: nil, }, }) }) diff --git a/module/signature/checksum.go b/module/signature/checksum.go new file mode 100644 index 00000000000..7784e7a38db --- /dev/null +++ b/module/signature/checksum.go @@ -0,0 +1,99 @@ +package signature + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + + "github.com/onflow/flow-go/model/flow" +) + +// CheckSumLen is fixed to be 4 bytes +const CheckSumLen = 4 + +func checksum(data []byte) [CheckSumLen]byte { + // since the checksum is only for detecting honest mistake, + // crc32 is enough + sum := crc32.ChecksumIEEE(data) + // converting the uint32 checksum value into [4]byte + var sumBytes [CheckSumLen]byte + binary.BigEndian.PutUint32(sumBytes[:], sum) + return sumBytes +} + +// CheckSumFromIdentities returns checksum for the given identities +func CheckSumFromIdentities(identities []flow.Identifier) [CheckSumLen]byte { + return checksum(EncodeIdentities(identities)) +} + +// EncodeIdentities will concatenation all the identities into bytes +func EncodeIdentities(identities []flow.Identifier) []byte { + // a simple concatenation is deterministic, since each identifier has fixed length. + encoded := make([]byte, 0, len(identities)*flow.IdentifierLen) + for _, id := range identities { + encoded = append(encoded, id[:]...) + } + return encoded +} + +// PrefixCheckSum prefix the given data with the checksum of the given identifier list +func PrefixCheckSum(canonicalList []flow.Identifier, signrIndices []byte) []byte { + sum := CheckSumFromIdentities(canonicalList) + prefixed := make([]byte, 0, len(sum)+len(signrIndices)) + prefixed = append(prefixed, sum[:]...) + prefixed = append(prefixed, signrIndices[:]...) + return prefixed +} + +// SplitCheckSum splits the given bytes into two parts: +// - prefixed checksum of the canonical identifier list +// - the signerIndices +// Expected error during normal operations: +// * ErrInvalidChecksum if the input is shorter than the expected checksum contained therein +func SplitCheckSum(checkSumPrefixedSignerIndices []byte) ([CheckSumLen]byte, []byte, error) { + if len(checkSumPrefixedSignerIndices) < CheckSumLen { + return [CheckSumLen]byte{}, nil, + fmt.Errorf("expect checkSumPrefixedSignerIndices to have at least %v bytes, but got %v: %w", + CheckSumLen, len(checkSumPrefixedSignerIndices), ErrInvalidChecksum) + } + + var sum [CheckSumLen]byte + copy(sum[:], checkSumPrefixedSignerIndices[:CheckSumLen]) + signerIndices := checkSumPrefixedSignerIndices[CheckSumLen:] + + return sum, signerIndices, nil +} + +// CompareAndExtract reads the checksum from the given `checkSumPrefixedSignerIndices` +// and compares it with the checksum of the given identifier list. +// It returns the signer indices if the checksum matches. +// Inputs: +// - canonicalList is the canonical list from decoder's view +// - checkSumPrefixedSignerIndices is the signer indices created by the encoder, +// and prefixed with the checksum of the canonical list from encoder's view. +// Expected error during normal operations: +// * ErrInvalidChecksum if the input is shorter than the expected checksum contained therein +func CompareAndExtract(canonicalList []flow.Identifier, checkSumPrefixedSignerIndices []byte) ([]byte, error) { + // the checkSumPrefixedSignerIndices bytes contains two parts: + // 1. the checksum of the canonical identifier list from encoder's view + // 2. the signer indices + // so split them + encoderChecksum, signerIndices, err := SplitCheckSum(checkSumPrefixedSignerIndices) + if err != nil { + return nil, fmt.Errorf("could not split checksum: %w", err) + } + + // this canonicalList here is from decoder's view. + // by comparing the checksum of the canonical list from encoder's view + // and the full canonical list from decoder's view, we can tell if the encoder + // encodes the signer indices using the same list as decoder. + decoderChecksum := CheckSumFromIdentities(canonicalList) + match := bytes.Equal(encoderChecksum[:], decoderChecksum[:]) + if !match { + return nil, fmt.Errorf("decoder sees a canonical list %v, which has a different checksum %x than the encoder's checksum %x: %w", + canonicalList, decoderChecksum, encoderChecksum, ErrInvalidChecksum) + } + + return signerIndices, nil +} diff --git a/module/signature/checksum_test.go b/module/signature/checksum_test.go new file mode 100644 index 00000000000..8becfbb5840 --- /dev/null +++ b/module/signature/checksum_test.go @@ -0,0 +1,80 @@ +package signature_test + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" + "pgregory.net/rapid" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/signature" + "github.com/onflow/flow-go/utils/unittest" +) + +// Test that the CheckSumFromIdentities method is able to produce checksum for empty identity list, +// and produce the correct checksum +func TestCheckSum(t *testing.T) { + t.Run("no identity", func(t *testing.T) { + require.Equal(t, signature.CheckSumFromIdentities(nil), signature.CheckSumFromIdentities(nil)) + require.Equal(t, signature.CheckSumFromIdentities([]flow.Identifier{}), signature.CheckSumFromIdentities([]flow.Identifier{})) + require.Equal(t, signature.CheckSumFromIdentities(nil), signature.CheckSumFromIdentities([]flow.Identifier{})) + }) + + t.Run("same identities, same checksum", func(t *testing.T) { + ids := unittest.IdentifierListFixture(3) + require.Equal(t, signature.CheckSumFromIdentities(ids), signature.CheckSumFromIdentities(ids)) + require.Equal(t, signature.CheckSumFromIdentities(ids[1:]), signature.CheckSumFromIdentities(ids[1:])) + require.Equal(t, signature.CheckSumFromIdentities(ids[2:]), signature.CheckSumFromIdentities(ids[2:])) + }) + + t.Run("different identities, different checksum", func(t *testing.T) { + ids := unittest.IdentifierListFixture(4) + require.NotEqual(t, signature.CheckSumFromIdentities(ids), signature.CheckSumFromIdentities(ids[1:])) // subset + require.NotEqual(t, signature.CheckSumFromIdentities(ids[1:]), signature.CheckSumFromIdentities(ids[:2])) // overlap + require.NotEqual(t, signature.CheckSumFromIdentities(ids[:2]), signature.CheckSumFromIdentities(ids[2:])) // no overlap + }) + + t.Run("checksum length always constant", func(t *testing.T) { + ids := unittest.IdentifierListFixture(4) + require.Len(t, signature.CheckSumFromIdentities(nil), signature.CheckSumLen) + require.Len(t, signature.CheckSumFromIdentities(ids), signature.CheckSumLen) + require.Len(t, signature.CheckSumFromIdentities(ids[1:]), signature.CheckSumLen) + require.Len(t, signature.CheckSumFromIdentities(ids[2:]), signature.CheckSumLen) + require.Len(t, signature.CheckSumFromIdentities(ids[3:]), signature.CheckSumLen) + }) +} + +// Test that if an encoder generates a checksum with a committee and added to some random data +// using PrefixCheckSum method, then an decoder using the same committee to call CompareAndExtract +// is able to extract the same data as the encoder. +func TestPrefixCheckSum(t *testing.T) { + rapid.Check(t, func(t *rapid.T) { + committeeSize := rapid.IntRange(0, 300).Draw(t, "committeeSize").(int) + committee := unittest.IdentifierListFixture(committeeSize) + data := rapid.IntRange(0, 200).Map(func(count int) []byte { + return unittest.RandomBytes(count) + }).Draw(t, "data").([]byte) + extracted, err := signature.CompareAndExtract(committee, signature.PrefixCheckSum(committee, data)) + require.NoError(t, err) + require.Equal(t, data, extracted) + }) +} + +// Test_InvalidCheckSum verifies correct handling of invalid checksums. We expect: +// 1. `SplitCheckSum` returns the expected sentinel error `ErrInvalidChecksum` is the input is shorter than 4 bytes +// 2. `CompareAndExtract` returns `ErrInvalidChecksum` is the checksum does not match +func Test_InvalidCheckSum(t *testing.T) { + t.Run("checksum too short", func(t *testing.T) { + for i := 0; i < 4; i++ { + _, _, err := signature.SplitCheckSum(unittest.RandomBytes(i)) + require.True(t, errors.Is(err, signature.ErrInvalidChecksum)) + } + }) + + t.Run("mismatching checksum", func(t *testing.T) { + committee := unittest.IdentifierListFixture(20) + _, err := signature.CompareAndExtract(committee, unittest.RandomBytes(112)) + require.True(t, errors.Is(err, signature.ErrInvalidChecksum)) + }) +} diff --git a/module/signature/errors.go b/module/signature/errors.go index 63c712e5e6e..912404a89a2 100644 --- a/module/signature/errors.go +++ b/module/signature/errors.go @@ -5,7 +5,21 @@ import ( "fmt" ) -var ErrInsufficientShares = errors.New("insufficient threshold signature shares") +var ( + ErrInvalidSignatureFormat = errors.New("signature's binary format is invalid") + + ErrInsufficientShares = errors.New("insufficient threshold signature shares") + + // ErrIncompatibleBitVectorLength indicates that the bit vector's length is different than + // the expected length, based on the supplied node list. + ErrIncompatibleBitVectorLength = errors.New("bit vector has incompatible length") + + // ErrIllegallyPaddedBitVector indicates that the index vector was padded with unexpected bit values. + ErrIllegallyPaddedBitVector = errors.New("index vector padded with unexpected bit values") + + // ErrInvalidChecksum indicates that the index vector's checksum is invalid + ErrInvalidChecksum = errors.New("index vector's checksum is invalid") +) /* ********************* InvalidSignatureIncludedError ********************* */ @@ -94,3 +108,10 @@ func IsInsufficientSignaturesError(err error) bool { var e InsufficientSignaturesError return errors.As(err, &e) } + +// IsDecodeSignerIndicesError returns whether err is about decoding signer indices +func IsDecodeSignerIndicesError(err error) bool { + return errors.Is(err, ErrIllegallyPaddedBitVector) || + errors.Is(err, ErrIncompatibleBitVectorLength) || + errors.Is(err, ErrInvalidChecksum) +} diff --git a/module/signature/signer_indices.go b/module/signature/signer_indices.go new file mode 100644 index 00000000000..bdab92082e1 --- /dev/null +++ b/module/signature/signer_indices.go @@ -0,0 +1,316 @@ +package signature + +import ( + "fmt" + + "github.com/onflow/flow-go/ledger/common/bitutils" + "github.com/onflow/flow-go/model/flow" +) + +// EncodeSignerToIndicesAndSigType encodes the given stakingSigners and beaconSigners into bit vectors for +// signer indices and sig types. +// PREREQUISITES: +// * The input `canonicalIdentifiers` must exhaustively list the set of authorized signers in their canonical order. +// * The inputs `stakingSigners` and `beaconSigners` are treated as sets, i.e. they +// should not contain any duplicates. +// * A node can be listed in either `stakingSigners` or `beaconSigners`. A node appearing in both lists +// constitutes an illegal input. +// * `stakingSigners` must be a subset of `canonicalIdentifiers` +// * `beaconSigners` must be a subset of `canonicalIdentifiers` +// +// RETURN VALUES: +// * `signerIndices` is a bit vector. Let signerIndices[i] denote the ith bit of `signerIndices`. +// ┌ 1 if and only if canonicalIdentifiers[i] is in `stakingSigners` or `beaconSigners` +// signerIndices[i] = └ 0 otherwise +// Let `n` be the length of `canonicalIdentifiers`. `signerIndices` contains at least `n` bits, though, we +// right-pad it with tailing zeros to full bytes. +// * `sigTypes` is a bit vector. Let sigTypes[i] denote the ith bit of `sigTypes` +// ┌ 1 if and only if the ith signer is in `beaconSigners` +// sigTypes[i] = └ 0 if and only if the ith signer is in `stakingSigners` +// (Per prerequisite, we require that no signer is listed in both `beaconSigners` and `stakingSigners`) +// +// Example: +// As an example consider the case where we have a committee C of 10 nodes in canonical oder +// C = [A,B,C,D,E,F,G,H,I,J] +// where nodes [B,F] are stakingSigners and beaconSigners are [C,E,G,I,J]. +// * First return parameter: `signerIndices` +// - We start with a bit vector v that has |C| number of bits +// - If a node contributed either as staking signer or beacon signer, +// we set the respective bit to 1: +// [A,B,C,D,E,F,G,H,I,J] +// ↓ ↓ ↓ ↓ ↓ ↓ ↓ +// 0,1,1,0,1,1,1,0,1,1 +// - Lastly, right-pad the resulting bit vector with 0 to full bytes. We have 10 committee members, +// so we pad to 2 bytes: +// 01101110 11000000 +// * second return parameter: `sigTypes` +// - Here, we restrict our focus on the signers, which we encoded in the previous step. +// In our example, nodes [B,C,E,F,G,I,J] signed in canonical order. This is exactly the same order, +// as we have represented the signer in the last step. +// - For these 5 nodes in their canonical order, we encode each node's signature type as +// bit-value 1: node was in beaconSigners +// bit-value 0: node was in stakingSigners +// This results in the bit vector +// [B,C,E,F,G,I,J] +// ↓ ↓ ↓ ↓ ↓ ↓ ↓ +// 0,1,0,1,1,1,1 +// - Again, we right-pad with zeros to full bytes, As we only had 7 signers, the sigType slice is 1byte long +// 01011110 +// +// the signer indices is prefixed with a checksum of the canonicalIdentifiers, which can be used by the decoder +// to verify if the decoder is using the same canonicalIdentifiers as the encoder to decode the signer indices. +// +// ERROR RETURNS +// During normal operations, no error returns are expected. This is because encoding signer sets is generally +// part of the node's internal work to generate messages. Hence, the inputs to this method come from other +// trusted components within the node. Therefore, any illegal input is treated as a symptom of an internal bug. +func EncodeSignerToIndicesAndSigType( + canonicalIdentifiers flow.IdentifierList, + stakingSigners flow.IdentifierList, + beaconSigners flow.IdentifierList, +) (signerIndices []byte, sigTypes []byte, err error) { + stakingSignersLookup := stakingSigners.Lookup() + if len(stakingSignersLookup) != len(stakingSigners) { + return nil, nil, fmt.Errorf("duplicated entries in staking signers %v", stakingSignersLookup) + } + beaconSignersLookup := beaconSigners.Lookup() + if len(beaconSignersLookup) != len(beaconSigners) { + return nil, nil, fmt.Errorf("duplicated entries in beacon signers %v", stakingSignersLookup) + } + + // encode Identifiers to `signerIndices`; and for each signer, encode the signature type in `sigTypes` + signerIndices = bitutils.MakeBitVector(len(canonicalIdentifiers)) + sigTypes = bitutils.MakeBitVector(len(stakingSigners) + len(beaconSigners)) + signerCounter := 0 + for canonicalIdx, member := range canonicalIdentifiers { + if _, ok := stakingSignersLookup[member]; ok { + bitutils.SetBit(signerIndices, canonicalIdx) + // The default value for sigTypes is bit zero, which corresponds to a staking sig. + // Hence, we don't have to change anything here. + delete(stakingSignersLookup, member) + signerCounter++ + continue + } + if _, ok := beaconSignersLookup[member]; ok { + bitutils.SetBit(signerIndices, canonicalIdx) + bitutils.SetBit(sigTypes, signerCounter) + delete(beaconSignersLookup, member) + signerCounter++ + continue + } + } + + if len(stakingSignersLookup) > 0 { + return nil, nil, fmt.Errorf("unknown staking signers %v", stakingSignersLookup) + } + if len(beaconSignersLookup) > 0 { + return nil, nil, fmt.Errorf("unknown or duplicated beacon signers %v", beaconSignersLookup) + } + + prefixed := PrefixCheckSum(canonicalIdentifiers, signerIndices) + + return prefixed, sigTypes, nil +} + +// DecodeSigTypeToStakingAndBeaconSigners decodes the bit-vector `sigType` to the set of +// staking signer identities (`stakingSigners`) and the set of beacon signer identities (`beaconSigners`). +// Prerequisite: +// * The input `signers` must be the set of signers in their canonical order. +// +// Expected Error returns during normal operations: +// * ErrIncompatibleBitVectorLength indicates that `signerIndices` has the wrong length +// * ErrIllegallyPaddedBitVector is the vector is padded with bits other than 0 +func DecodeSigTypeToStakingAndBeaconSigners( + signers flow.IdentityList, + sigType []byte, +) (stakingSigners flow.IdentityList, beaconSigners flow.IdentityList, err error) { + numberSigners := len(signers) + if e := validPadding(sigType, numberSigners); e != nil { + return nil, nil, fmt.Errorf("sigType is invalid: %w", e) + } + + // decode bits to Identities + stakingSigners = make(flow.IdentityList, 0, numberSigners) + beaconSigners = make(flow.IdentityList, 0, numberSigners) + for i, signer := range signers { + if bitutils.ReadBit(sigType, i) == 0 { + stakingSigners = append(stakingSigners, signer) + } else { + beaconSigners = append(beaconSigners, signer) + } + } + return stakingSigners, beaconSigners, nil +} + +// EncodeSignersToIndices encodes the given signerIDs into compacted bit vector. +// PREREQUISITES: +// * The input `canonicalIdentifiers` must exhaustively list the set of authorized signers in their canonical order. +// * The input `signerIDs` represents a set, i.e. it should not contain any duplicates. +// * `signerIDs` must be a subset of `canonicalIdentifiers` +// +// RETURN VALUE: +// * `signerIndices` is a bit vector. Let signerIndices[i] denote the ith bit of `signerIndices`. +// ┌ 1 if and only if canonicalIdentifiers[i] is in `signerIDs` +// signerIndices[i] = └ 0 otherwise +// Let `n` be the length of `canonicalIdentifiers`. `signerIndices` contains at least `n` bits, though, we +// right-pad it with tailing zeros to full bytes. +// +// Example: +// As an example consider the case where we have a committee C of 10 nodes in canonical oder +// C = [A,B,C,D,E,F,G,H,I,J] +// where nodes [B,F] are stakingSigners, and beaconSigners are [C,E,G,I,J]. +// * First return parameter: QC.signerIndices +// - We start with a bit vector v that has |C| number of bits +// - If a node contributed either as staking signer or beacon signer, +// we set the respective bit to 1: +// [A,B,C,D,E,F,G,H,I,J] +// ↓ ↓ ↓ ↓ ↓ ↓ ↓ +// 0,1,1,0,1,1,1,0,1,1 +// - Lastly, right-pad the resulting bit vector with 0 to full bytes. We have 10 committee members, +// so we pad to 2 bytes: +// 01101110 11000000 +// +// ERROR RETURNS +// During normal operations, no error returns are expected. This is because encoding signer sets is generally +// part of the node's internal work to generate messages. Hence, the inputs to this method come from other +// trusted components within the node. Therefore, any illegal input is treated as a symptom of an internal bug. +// canonicalIdentifiers represents all identities who are eligible to sign the given resource. It excludes +// identities who are ineligible to sign the given resource. For example, canonicalIdentifiers in the context +// of a cluster consensus quorum certificate would include authorized members of the cluster and +// exclude ejected members of the cluster, or unejected collection nodes from a different cluster. +// the signer indices is prefixed with a checksum of the canonicalIdentifiers, which can be used by the decoder +// to verify if the decoder is using the same canonicalIdentifiers as the encoder to decode the signer indices. +func EncodeSignersToIndices( + canonicalIdentifiers flow.IdentifierList, + signerIDs flow.IdentifierList, +) (signerIndices []byte, err error) { + signersLookup := signerIDs.Lookup() + if len(signersLookup) != len(signerIDs) { + return nil, fmt.Errorf("duplicated entries in signerIDs %v", signerIDs) + } + + // encode Identifiers to bits + signerIndices = bitutils.MakeBitVector(len(canonicalIdentifiers)) + for canonicalIdx, member := range canonicalIdentifiers { + if _, ok := signersLookup[member]; ok { + bitutils.SetBit(signerIndices, canonicalIdx) + delete(signersLookup, member) + } + } + if len(signersLookup) > 0 { + return nil, fmt.Errorf("unknown signers IDs in the keys of %v", signersLookup) + } + + prefixed := PrefixCheckSum(canonicalIdentifiers, signerIndices) + + return prefixed, nil +} + +// DecodeSignerIndicesToIdentifiers decodes the given compacted bit vector into signerIDs +// Prerequisite: +// * The input `canonicalIdentifiers` must exhaustively list the set of authorized signers in their canonical order. +// +// Expected Error returns during normal operations: +// * ErrIncompatibleBitVectorLength indicates that `signerIndices` has the wrong length +// * ErrIllegallyPaddedBitVector is the vector is padded with bits other than 0 +// * ErrInvalidChecksum if the input is shorter than the expected checksum contained therein +func DecodeSignerIndicesToIdentifiers( + canonicalIdentifiers flow.IdentifierList, + prefixed []byte, +) (flow.IdentifierList, error) { + // the prefixed contains the checksum of the canonicalIdentifiers that the signerIndices + // creator saw. + // extract the checksum and compare with the canonicalIdentifiers to see if both + // the signerIndices creator and validator see the same list. + signerIndices, err := CompareAndExtract(canonicalIdentifiers, prefixed) + if err != nil { + return nil, fmt.Errorf("could not extract signer indices from prefixed data: %w", err) + } + + numberCanonicalNodes := len(canonicalIdentifiers) + err = validPadding(signerIndices, numberCanonicalNodes) + if err != nil { + return nil, fmt.Errorf("signerIndices are invalid: %w", err) + } + + // decode bits to Identifiers + signerIDs := make(flow.IdentifierList, 0, numberCanonicalNodes) + for i := 0; i < numberCanonicalNodes; i++ { + if bitutils.ReadBit(signerIndices, i) == 1 { + signerIDs = append(signerIDs, canonicalIdentifiers[i]) + } + } + return signerIDs, nil +} + +// DecodeSignerIndicesToIdentities decodes the given compacted bit vector into node Identities. +// Prerequisite: +// * The input `canonicalIdentifiers` must exhaustively list the set of authorized signers in their canonical order. +// +// Expected Error returns during normal operations: +// * ErrIncompatibleBitVectorLength indicates that `signerIndices` has the wrong length +// * ErrIllegallyPaddedBitVector is the vector is padded with bits other than 0 +// * ErrInvalidChecksum if the input is shorter than the expected checksum contained therein +func DecodeSignerIndicesToIdentities( + canonicalIdentities flow.IdentityList, + prefixed []byte, +) (flow.IdentityList, error) { + // the prefixed contains the checksum of the canonicalIdentifiers that the signerIndices + // creator saw. + // extract the checksum and compare with the canonicalIdentifiers to see if both + // the signerIndices creator and validator see the same list. + signerIndices, err := CompareAndExtract(canonicalIdentities.NodeIDs(), prefixed) + if err != nil { + return nil, fmt.Errorf("could not extract signer indices from prefixed data: %w", err) + } + + numberCanonicalNodes := len(canonicalIdentities) + if e := validPadding(signerIndices, numberCanonicalNodes); e != nil { + return nil, fmt.Errorf("signerIndices padding are invalid: %w", e) + } + + // decode bits to Identities + signerIdentities := make(flow.IdentityList, 0, numberCanonicalNodes) + for i := 0; i < numberCanonicalNodes; i++ { + if bitutils.ReadBit(signerIndices, i) == 1 { + signerIdentities = append(signerIdentities, canonicalIdentities[i]) + } + } + return signerIdentities, nil +} + +// validPadding verifies that `bitVector` satisfies the following criteria +// 1. The `bitVector`'s length [in bytes], must be the _minimal_ possible length such that it can hold +// `numUsedBits` number of bits. Otherwise, we return an `ErrIncompatibleBitVectorLength`. +// 2. If `numUsedBits` is _not_ an integer-multiple of 8, `bitVector` is padded with tailing bits. Per +// convention, these bits must be zero. Otherwise, we return an `ErrIllegallyPaddedBitVector`. +// All errors represent expected failure cases for byzantine inputs. There are _no unexpected_ error returns. +func validPadding(bitVector []byte, numUsedBits int) error { + // Verify condition 1: + l := len(bitVector) + if l != bitutils.MinimalByteSliceLength(numUsedBits) { + return fmt.Errorf("the bit vector contains a payload of %d used bits, so it should have %d bytes but has %d bytes: %w", + numUsedBits, bitutils.MinimalByteSliceLength(numUsedBits), l, ErrIncompatibleBitVectorLength) + } + // Condition 1 implies that the number of padded bits must be strictly smaller than 8. Otherwise, the vector + // could have fewer bytes and still have enough room to store `numUsedBits`. + + // Verify condition 2, i.e. that all padded bits are all 0: + // * As `bitVector` passed check 1, all padded bits are located in `bitVector`s _last byte_. + // * Let `lastByte` be the last byte of `bitVector`. The leading bits, specifically `numUsedBits & 7`, + // belong to the used payload, which could have non-zero values. We remove these using left-bit-shifts. + // The result contains exactly all padded bits (plus some auxiliary 0-bits included by the bit-shift + // operator). Hence, condition 2 is satisfied if and only if the result is identical to zero. + // Note that this implementation is much more efficient than individually checking the padded bits, as we check all + // padded bits at once; furthermore, we only use multiplication, subtraction, shift, which are fast. + if numUsedBits&7 == 0 { // if numUsedBits is multiple of 8, then there are no padding bits to check + return nil + } + // the above check has ensured that lastByte does exist (l==0 is excluded) + lastByte := bitVector[l-1] + if (lastByte << (numUsedBits & 7)) != 0 { // shift by numUsedBits % 8 + return fmt.Errorf("some padded bits are not zero with %d used bits (bitVector: %x): %w", numUsedBits, bitVector, ErrIllegallyPaddedBitVector) + } + + return nil +} diff --git a/module/signature/signer_indices_test.go b/module/signature/signer_indices_test.go new file mode 100644 index 00000000000..260ca2c712c --- /dev/null +++ b/module/signature/signer_indices_test.go @@ -0,0 +1,376 @@ +package signature_test + +import ( + "fmt" + "sort" + "testing" + + "pgregory.net/rapid" + + "github.com/onflow/flow-go/ledger/common/bitutils" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/model/flow/filter/id" + "github.com/onflow/flow-go/model/flow/order" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/signature" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestEncodeDecodeIdentities verifies the two path of encoding -> decoding: +// 1. Identifiers --encode--> Indices --decode--> Identifiers +// 2. for the decoding step, we offer an optimized convenience function to directly +// decode to full identities: Indices --decode--> Identities +func TestEncodeDecodeIdentities(t *testing.T) { + canonicalIdentities := unittest.IdentityListFixture(20) + canonicalIdentifiers := canonicalIdentities.NodeIDs() + for s := 0; s < 20; s++ { + for e := s; e < 20; e++ { + var signers = canonicalIdentities[s:e] + + // encoding + indices, err := signature.EncodeSignersToIndices(canonicalIdentities.NodeIDs(), signers.NodeIDs()) + require.NoError(t, err) + + // decoding option 1: decode to Identifiers + decodedIDs, err := signature.DecodeSignerIndicesToIdentifiers(canonicalIdentifiers, indices) + require.NoError(t, err) + require.Equal(t, signers.NodeIDs(), decodedIDs) + + // decoding option 2: decode to Identities + decodedIdentities, err := signature.DecodeSignerIndicesToIdentities(canonicalIdentities, indices) + require.NoError(t, err) + require.Equal(t, signers, decodedIdentities) + } + } +} + +func TestEncodeIdentity(t *testing.T) { + only := unittest.IdentifierListFixture(1) + indices, err := signature.EncodeSignersToIndices(only, only) + require.NoError(t, err) + // byte(1,0,0,0,0,0,0,0) + require.Equal(t, []byte{byte(1 << 7)}, indices[signature.CheckSumLen:]) +} + +// TestEncodeFail verifies that an error is returned in case some signer is not part +// of the set of canonicalIdentifiers +func TestEncodeFail(t *testing.T) { + fullIdentities := unittest.IdentifierListFixture(20) + _, err := signature.EncodeSignersToIndices(fullIdentities[1:], fullIdentities[:10]) + require.Error(t, err) +} + +// Test_EncodeSignerToIndicesAndSigType uses fuzzy-testing framework Rapid to +// test the method EncodeSignerToIndicesAndSigType: +// * we generate a set of authorized signer: `committeeIdentities` +// * part of this set is sampled as staking singers: `stakingSigners` +// * another part of `committeeIdentities` is sampled as beacon singers: `beaconSigners` +// * we encode the set and check that the results conform to the protocol specification +func Test_EncodeSignerToIndicesAndSigType(t *testing.T) { + rapid.Check(t, func(t *rapid.T) { + // select total committee size, number of random beacon signers and number of staking signers + committeeSize := rapid.IntRange(1, 272).Draw(t, "committeeSize").(int) + numStakingSigners := rapid.IntRange(0, committeeSize).Draw(t, "numStakingSigners").(int) + numRandomBeaconSigners := rapid.IntRange(0, committeeSize-numStakingSigners).Draw(t, "numRandomBeaconSigners").(int) + + // create committee + committeeIdentities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) + committee := committeeIdentities.NodeIDs() + stakingSigners, beaconSigners := sampleSigners(committee, numStakingSigners, numRandomBeaconSigners) + + // encode + prefixed, sigTypes, err := signature.EncodeSignerToIndicesAndSigType(committee, stakingSigners, beaconSigners) + require.NoError(t, err) + + signerIndices, err := signature.CompareAndExtract(committeeIdentities.NodeIDs(), prefixed) + require.NoError(t, err) + + // check verify signer indices + unorderedSigners := stakingSigners.Union(beaconSigners) // caution, the Union operation potentially changes the ordering + correctEncoding(t, signerIndices, committee, unorderedSigners) + + // check sigTypes + canSigners := committeeIdentities.Filter(filter.HasNodeID(unorderedSigners...)).NodeIDs() // generates list of signer IDs in canonical order + correctEncoding(t, sigTypes, canSigners, beaconSigners) + }) +} + +// Test_DecodeSigTypeToStakingAndBeaconSigners uses fuzzy-testing framework Rapid to +// test the method DecodeSigTypeToStakingAndBeaconSigners: +// * we generate a set of authorized signer: `committeeIdentities` +// * part of this set is sampled as staking singers: `stakingSigners` +// * another part of `committeeIdentities` is sampled as beacon singers: `beaconSigners` +// * we encode the set and check that the results conform to the protocol specification +// * We encode the set using `EncodeSignerToIndicesAndSigType` (tested before) and then decode it. +// Thereby we should recover the original input. Caution, the order might be different, +// so we sort both sets. +func Test_DecodeSigTypeToStakingAndBeaconSigners(t *testing.T) { + rapid.Check(t, func(t *rapid.T) { + // select total committee size, number of random beacon signers and number of staking signers + committeeSize := rapid.IntRange(1, 272).Draw(t, "committeeSize").(int) + numStakingSigners := rapid.IntRange(0, committeeSize).Draw(t, "numStakingSigners").(int) + numRandomBeaconSigners := rapid.IntRange(0, committeeSize-numStakingSigners).Draw(t, "numRandomBeaconSigners").(int) + + // create committee + committeeIdentities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) + committee := committeeIdentities.NodeIDs() + stakingSigners, beaconSigners := sampleSigners(committee, numStakingSigners, numRandomBeaconSigners) + + // encode + signerIndices, sigTypes, err := signature.EncodeSignerToIndicesAndSigType(committee, stakingSigners, beaconSigners) + require.NoError(t, err) + + // decode + decSignerIdentites, err := signature.DecodeSignerIndicesToIdentities(committeeIdentities, signerIndices) + require.NoError(t, err) + decStakingSigners, decBeaconSigners, err := signature.DecodeSigTypeToStakingAndBeaconSigners(decSignerIdentites, sigTypes) + require.NoError(t, err) + + // verify; note that there is a slightly different convention between Filter and the decoding logic: + // Filter returns nil for an empty list, while the decoding logic returns an instance of an empty slice + sigIdentities := committeeIdentities.Filter(filter.Or(filter.HasNodeID(stakingSigners...), filter.HasNodeID(beaconSigners...))) // signer identities in canonical order + if len(stakingSigners)+len(decBeaconSigners) > 0 { + require.Equal(t, sigIdentities, decSignerIdentites) + } + if len(stakingSigners) == 0 { + require.Empty(t, decStakingSigners) + } else { + require.Equal(t, committeeIdentities.Filter(filter.HasNodeID(stakingSigners...)), decStakingSigners) + } + if len(decBeaconSigners) == 0 { + require.Empty(t, decBeaconSigners) + } else { + require.Equal(t, committeeIdentities.Filter(filter.HasNodeID(beaconSigners...)), decBeaconSigners) + } + }) +} + +func Test_ValidPaddingErrIncompatibleBitVectorLength(t *testing.T) { + var signers flow.IdentityList + var err error + // if bits is multiply of 8, then there is no padding needed, any sig type can be decoded. + signers = unittest.IdentityListFixture(16) + + // 16 bits needs 2 bytes, provided 2 bytes + _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, unittest.RandomBytes(2)) + require.NoError(t, err) + + // 1 byte less + _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(255)}) + require.Error(t, err) + require.ErrorAs(t, err, &signature.ErrIncompatibleBitVectorLength) + + // 1 byte more + _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{}) + require.Error(t, err) + require.ErrorAs(t, err, &signature.ErrIncompatibleBitVectorLength) + + // if bits is not multiply of 8, then padding is needed + signers = unittest.IdentityListFixture(15) + _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(255), byte(254)}) + require.NoError(t, err) + + // 1 byte more + _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(255), byte(255), byte(254)}) + require.Error(t, err) + require.ErrorAs(t, err, &signature.ErrIncompatibleBitVectorLength) + + // 1 byte less + _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(254)}) + require.Error(t, err) + require.ErrorAs(t, err, &signature.ErrIncompatibleBitVectorLength) + + // if bits is not multiply of 8, + // 1 byte more + signers = unittest.IdentityListFixture(0) + _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(255)}) + require.Error(t, err) + require.ErrorAs(t, err, &signature.ErrIncompatibleBitVectorLength) + + // 1 byte more + signers = unittest.IdentityListFixture(1) + _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(0), byte(0)}) + require.Error(t, err) + require.ErrorAs(t, err, &signature.ErrIncompatibleBitVectorLength) + + // 1 byte less + signers = unittest.IdentityListFixture(7) + _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{}) + require.Error(t, err) + require.ErrorAs(t, err, &signature.ErrIncompatibleBitVectorLength) +} + +func TestValidPaddingErrIllegallyPaddedBitVector(t *testing.T) { + var signers flow.IdentityList + var err error + // if bits is multiply of 8, then there is no padding needed, any sig type can be decoded. + for count := 1; count < 8; count++ { + signers = unittest.IdentityListFixture(count) + _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(255)}) // last bit should be 0, but 1 + require.Error(t, err) + require.ErrorAs(t, err, &signature.ErrIllegallyPaddedBitVector) + + _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(1)}) // last bit should be 0, but 1 + require.Error(t, err) + require.ErrorAs(t, err, &signature.ErrIllegallyPaddedBitVector) + } + + for count := 9; count < 16; count++ { + signers = unittest.IdentityListFixture(count) + _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(255), byte(255)}) // last bit should be 0, but 1 + require.Error(t, err) + require.ErrorAs(t, err, &signature.ErrIllegallyPaddedBitVector) + + _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(1), byte(1)}) // last bit should be 0, but 1 + require.Error(t, err) + require.ErrorAs(t, err, &signature.ErrIllegallyPaddedBitVector) + } +} + +// Test_EncodeSignersToIndices uses fuzzy-testing framework Rapid to test the method EncodeSignersToIndices: +// * we generate a set of authorized signer: `identities` +// * part of this set is sampled as singers: `signers` +// * we encode the set and check that the results conform to the protocol specification +func Test_EncodeSignersToIndices(t *testing.T) { + rapid.Check(t, func(t *rapid.T) { + // select total committee size, number of random beacon signers and number of staking signers + committeeSize := rapid.IntRange(1, 272).Draw(t, "committeeSize").(int) + numSigners := rapid.IntRange(0, committeeSize).Draw(t, "numSigners").(int) + + // create committee + identities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) + committee := identities.NodeIDs() + signers := committee.Sample(uint(numSigners)) + + // encode + prefixed, err := signature.EncodeSignersToIndices(committee, signers) + require.NoError(t, err) + + signerIndices, err := signature.CompareAndExtract(committee, prefixed) + require.NoError(t, err) + + // check verify signer indices + correctEncoding(t, signerIndices, committee, signers) + }) +} + +// Test_DecodeSignerIndicesToIdentifiers uses fuzzy-testing framework Rapid to test the method DecodeSignerIndicesToIdentifiers: +// * we generate a set of authorized signer: `identities` +// * part of this set is sampled as signers: `signers` +// * We encode the set using `EncodeSignersToIndices` (tested before) and then decode it. +// Thereby we should recover the original input. Caution, the order might be different, +// so we sort both sets. +func Test_DecodeSignerIndicesToIdentifiers(t *testing.T) { + rapid.Check(t, func(t *rapid.T) { + // select total committee size, number of random beacon signers and number of staking signers + committeeSize := rapid.IntRange(1, 272).Draw(t, "committeeSize").(int) + numSigners := rapid.IntRange(0, committeeSize).Draw(t, "numSigners").(int) + + // create committee + identities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) + committee := identities.NodeIDs() + signers := committee.Sample(uint(numSigners)) + sort.Sort(signers) + + // encode + signerIndices, err := signature.EncodeSignersToIndices(committee, signers) + require.NoError(t, err) + + // decode and verify + decodedSigners, err := signature.DecodeSignerIndicesToIdentifiers(committee, signerIndices) + require.NoError(t, err) + sort.Sort(decodedSigners) + require.Equal(t, signers, decodedSigners) + }) +} + +// Test_DecodeSignerIndicesToIdentities uses fuzzy-testing framework Rapid to test the method DecodeSignerIndicesToIdentities: +// * we generate a set of authorized signer: `identities` +// * part of this set is sampled as singers: `signers` +// * We encode the set using `EncodeSignersToIndices` (tested before) and then decode it. +// Thereby we should recover the original input. Caution, the order might be different, +// so we sort both sets. +// Note: this is _almost_ the same test as `Test_DecodeSignerIndicesToIdentifiers`. However, in the other +// test, we decode to node IDs; while in this test, we decode to full _Identities_. + +const UpperBoundCommitteeSize = 272 + +func Test_DecodeSignerIndicesToIdentities(t *testing.T) { + + rapid.Check(t, func(t *rapid.T) { + // select total committee size, number of random beacon signers and number of staking signers + committeeSize := rapid.IntRange(1, UpperBoundCommitteeSize).Draw(t, "committeeSize").(int) + numSigners := rapid.IntRange(0, committeeSize).Draw(t, "numSigners").(int) + + // create committee + identities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) + signers := identities.Sample(uint(numSigners)) + + // encode + signerIndices, err := signature.EncodeSignersToIndices(identities.NodeIDs(), signers.NodeIDs()) + require.NoError(t, err) + + // decode and verify + decodedSigners, err := signature.DecodeSignerIndicesToIdentities(identities, signerIndices) + require.NoError(t, err) + require.Equal(t, signers.Sort(order.Canonical), decodedSigners.Sort(order.Canonical)) + }) +} + +// sampleSigners takes `committee` and samples to _disjoint_ subsets +// (`stakingSigners` and `randomBeaconSigners`) with the specified cardinality +func sampleSigners( + committee flow.IdentifierList, + numStakingSigners int, + numRandomBeaconSigners int, +) (stakingSigners flow.IdentifierList, randomBeaconSigners flow.IdentifierList) { + if numStakingSigners+numRandomBeaconSigners > len(committee) { + panic(fmt.Sprintf("Cannot sample %d nodes out of a committee is size %d", numStakingSigners+numRandomBeaconSigners, len(committee))) + } + + stakingSigners = committee.Sample(uint(numStakingSigners)) + remaining := committee.Filter(id.Not(id.In(stakingSigners...))) + randomBeaconSigners = remaining.Sample(uint(numRandomBeaconSigners)) + return +} + +// correctEncoding verifies that the given indices conform to the following specification: +// * indices is the _smallest_ possible byte slice that contains at least `len(canonicalIdentifiers)` number of _bits_ +// * Let indices[i] denote the ith bit of `indices`. We verify that: +// ┌ 1 if and only if canonicalIdentifiers[i] is in `subset` +// indices[i] = └ 0 otherwise +// This function can be used to verify signer indices as well as signature type encoding +func correctEncoding(t require.TestingT, indices []byte, canonicalIdentifiers flow.IdentifierList, subset flow.IdentifierList) { + // verify that indices has correct length + numberBits := 8 * len(indices) + require.True(t, numberBits >= len(canonicalIdentifiers), "signerIndices has too few bits") + require.True(t, numberBits-len(canonicalIdentifiers) < 8, fmt.Sprintf("signerIndices %v is padded with too many %v bits", + numberBits, len(canonicalIdentifiers))) + + // convert canonicalIdentifiers to map Identifier -> index + m := make(map[flow.Identifier]int) + for i, id := range canonicalIdentifiers { + m[id] = i + } + + // make sure that every member of the subset is represented by a 1 in `indices` + for _, id := range subset { + bitIndex := m[id] + require.True(t, bitutils.ReadBit(indices, bitIndex) == 1) + delete(m, id) + } + + // as we delete all IDs in subset from m, the remaining ID in `m` should be represented by a 0 in `indices` + for id := range m { + bitIndex := m[id] + require.True(t, bitutils.ReadBit(indices, bitIndex) == 0) + } + + // the padded bits should also all be 0: + for i := len(canonicalIdentifiers); i < 8*len(indices); i++ { + require.True(t, bitutils.ReadBit(indices, i) == 0) + } +} diff --git a/consensus/hotstuff/signature/type_encoder.go b/module/signature/type_encoder.go similarity index 79% rename from consensus/hotstuff/signature/type_encoder.go rename to module/signature/type_encoder.go index 24ef589f69f..5deae5b5929 100644 --- a/consensus/hotstuff/signature/type_encoder.go +++ b/module/signature/type_encoder.go @@ -3,15 +3,14 @@ package signature import ( "fmt" - "github.com/onflow/flow-go/consensus/hotstuff" - "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/model/encoding" ) const SigLen = crypto.SignatureLenBLSBLS12381 // EncodeSingleSig encodes a single signature into signature data as required by the consensus design. -func EncodeSingleSig(sigType hotstuff.SigType, sig crypto.Signature) []byte { +func EncodeSingleSig(sigType encoding.SigType, sig crypto.Signature) []byte { t := byte(sigType) encoded := make([]byte, 0, len(sig)+1) encoded = append(encoded, t) @@ -22,16 +21,16 @@ func EncodeSingleSig(sigType hotstuff.SigType, sig crypto.Signature) []byte { // DecodeSingleSig decodes the signature data into a cryptographic signature and a type as required by // the consensus design. Cryptographic validity of signatures is _not_ checked. // It returns: -// - 0, nil, ErrInvalidFormat if the sig type is invalid (covers nil or empty sigData) +// - 0, nil, ErrInvalidSignatureFormat if the sig type is invalid (covers nil or empty sigData) // - sigType, signature, nil if the sig type is valid and the decoding is done successfully. -func DecodeSingleSig(sigData []byte) (hotstuff.SigType, crypto.Signature, error) { +func DecodeSingleSig(sigData []byte) (encoding.SigType, crypto.Signature, error) { if len(sigData) == 0 { - return 0, nil, fmt.Errorf("empty sig data: %w", model.ErrInvalidFormat) + return 0, nil, fmt.Errorf("empty sig data: %w", ErrInvalidSignatureFormat) } - sigType := hotstuff.SigType(sigData[0]) + sigType := encoding.SigType(sigData[0]) if !sigType.Valid() { - return 0, nil, fmt.Errorf("invalid sig type %v: %w", sigType, model.ErrInvalidFormat) + return 0, nil, fmt.Errorf("invalid sig type %v: %w", sigType, ErrInvalidSignatureFormat) } sig := crypto.Signature(sigData[1:]) @@ -59,7 +58,7 @@ func EncodeDoubleSig(stakingSig crypto.Signature, beaconSig crypto.Signature) [] // and the tailing half random beacon sig // - staking signature, nil, nil: // if sigData is the size of a BLS signature, we interpret sigData entirely as staking signature -// - nil, nil, ErrInvalidFormat if the sig type is invalid (covers nil or empty sigData) +// - nil, nil, ErrInvalidSignatureFormat if the sig type is invalid (covers nil or empty sigData) func DecodeDoubleSig(sigData []byte) (crypto.Signature, crypto.Signature, error) { sigLength := len(sigData) switch sigLength { @@ -69,5 +68,5 @@ func DecodeDoubleSig(sigData []byte) (crypto.Signature, crypto.Signature, error) return sigData[:SigLen], sigData[SigLen:], nil } - return nil, nil, fmt.Errorf("invalid sig data length %d: %w", sigLength, model.ErrInvalidFormat) + return nil, nil, fmt.Errorf("invalid sig data length %d: %w", sigLength, ErrInvalidSignatureFormat) } diff --git a/consensus/hotstuff/signature/consensus_test.go b/module/signature/type_encoder_test.go similarity index 51% rename from consensus/hotstuff/signature/consensus_test.go rename to module/signature/type_encoder_test.go index a62de6b8850..1291a6f3411 100644 --- a/consensus/hotstuff/signature/consensus_test.go +++ b/module/signature/type_encoder_test.go @@ -1,29 +1,30 @@ -package signature +package signature_test import ( "testing" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/consensus/hotstuff" + "github.com/onflow/flow-go/model/encoding" + "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/utils/unittest" ) func TestEncodeDecodeStakingSig(t *testing.T) { sig := unittest.SignatureFixture() - encoded := EncodeSingleSig(hotstuff.SigTypeStaking, sig) - decodedType, decodedSig, err := DecodeSingleSig(encoded) + encoded := signature.EncodeSingleSig(encoding.SigTypeStaking, sig) + decodedType, decodedSig, err := signature.DecodeSingleSig(encoded) require.NoError(t, err) - require.Equal(t, hotstuff.SigTypeStaking, decodedType) + require.Equal(t, encoding.SigTypeStaking, decodedType) require.Equal(t, sig, decodedSig) } func TestEncodeDecodeRandomBeaconSig(t *testing.T) { sig := unittest.SignatureFixture() - encoded := EncodeSingleSig(hotstuff.SigTypeRandomBeacon, sig) - decodedType, decodedSig, err := DecodeSingleSig(encoded) + encoded := signature.EncodeSingleSig(encoding.SigTypeRandomBeacon, sig) + decodedType, decodedSig, err := signature.DecodeSingleSig(encoded) require.NoError(t, err) - require.Equal(t, hotstuff.SigTypeRandomBeacon, decodedType) + require.Equal(t, encoding.SigTypeRandomBeacon, decodedType) require.Equal(t, sig, decodedSig) } @@ -31,16 +32,16 @@ func TestEncodeDecodeRandomBeaconSig(t *testing.T) { func TestEncodeDecodeInvalidSig(t *testing.T) { sig := unittest.SignatureFixture() - for i := int(hotstuff.SigTypeRandomBeacon) + 1; i < int(hotstuff.SigTypeRandomBeacon)+5; i++ { - sigType := hotstuff.SigType(i) - encoded := EncodeSingleSig(sigType, sig) - _, _, err := DecodeSingleSig(encoded) + for i := int(encoding.SigTypeRandomBeacon) + 1; i < int(encoding.SigTypeRandomBeacon)+5; i++ { + sigType := encoding.SigType(i) + encoded := signature.EncodeSingleSig(sigType, sig) + _, _, err := signature.DecodeSingleSig(encoded) require.Error(t, err) } } func TestDecodeEmptySig(t *testing.T) { - _, _, err := DecodeSingleSig([]byte{}) + _, _, err := signature.DecodeSingleSig([]byte{}) require.Error(t, err) } @@ -48,15 +49,15 @@ func TestDecodeEmptySig(t *testing.T) { func TestEncodeTwoSigsDifferent(t *testing.T) { sigs := unittest.SignaturesFixture(2) sig1, sig2 := sigs[0], sigs[1] - encodedSig1 := EncodeSingleSig(hotstuff.SigTypeStaking, sig1) - encodedSig2 := EncodeSingleSig(hotstuff.SigTypeStaking, sig2) + encodedSig1 := signature.EncodeSingleSig(encoding.SigTypeStaking, sig1) + encodedSig2 := signature.EncodeSingleSig(encoding.SigTypeStaking, sig2) require.NotEqual(t, encodedSig1, encodedSig2) } // encode the same sig with the different type, the encoded sig should be different func TestEncodeSameSigWithDifferentTypeShouldBeDifferen(t *testing.T) { sig := unittest.SignatureFixture() - encodedAsStaking := EncodeSingleSig(hotstuff.SigTypeStaking, sig) - encodedAsRandomBeacon := EncodeSingleSig(hotstuff.SigTypeRandomBeacon, sig) + encodedAsStaking := signature.EncodeSingleSig(encoding.SigTypeStaking, sig) + encodedAsRandomBeacon := signature.EncodeSingleSig(encoding.SigTypeRandomBeacon, sig) require.NotEqual(t, encodedAsStaking, encodedAsRandomBeacon) } diff --git a/network/test/testUtil.go b/network/test/testUtil.go index 83289bfc786..77aefb844b5 100644 --- a/network/test/testUtil.go +++ b/network/test/testUtil.go @@ -111,7 +111,13 @@ func GenerateIDs( opt(o) } - identities := unittest.IdentityListFixture(n, o.idOpts...) + identities := unittest.IdentityListFixture(n, unittest.WithAllRoles()) + + for _, identity := range identities { + for _, idOpt := range o.idOpts { + idOpt(identity) + } + } idProvider := id.NewFixedIdentityProvider(identities) diff --git a/network/topology/helper.go b/network/topology/helper.go index 1b4b54cc778..66305a2a273 100644 --- a/network/topology/helper.go +++ b/network/topology/helper.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" @@ -26,7 +27,7 @@ func MockStateForCollectionNodes(t *testing.T, collectorIds flow.IdentityList, c epochQuery := new(mockprotocol.EpochQuery) epoch := new(mockprotocol.Epoch) assignments := unittest.ClusterAssignment(clusterNum, collectorIds) - clusters, err := flow.NewClusterList(assignments, collectorIds) + clusters, err := factory.NewClusterList(assignments, collectorIds) require.NoError(t, err) epoch.On("Clustering").Return(clusters, nil) diff --git a/state/cluster/root_block.go b/state/cluster/root_block.go index 7dd6eca1bf2..6789d0d3d12 100644 --- a/state/cluster/root_block.go +++ b/state/cluster/root_block.go @@ -29,7 +29,7 @@ func CanonicalRootBlock(epoch uint64, participants flow.IdentityList) *cluster.B PayloadHash: rootBlockPayloadHash, Timestamp: flow.GenesisTime, View: 0, - ParentVoterIDs: nil, + ParentVoterIndices: nil, ParentVoterSigData: nil, ProposerID: flow.ZeroID, ProposerSigData: nil, diff --git a/state/protocol/badger/mutator_test.go b/state/protocol/badger/mutator_test.go index ad5e310d49a..a651ed8c51b 100644 --- a/state/protocol/badger/mutator_test.go +++ b/state/protocol/badger/mutator_test.go @@ -512,7 +512,7 @@ func TestExtendEpochTransitionValid(t *testing.T) { // add a participant for the next epoch epoch2NewParticipant := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) - epoch2Participants := append(participants, epoch2NewParticipant).Sort(order.ByNodeIDAsc) + epoch2Participants := append(participants, epoch2NewParticipant).Sort(order.Canonical) // create the epoch setup event for the second epoch epoch2Setup := unittest.EpochSetupFixture( @@ -594,6 +594,7 @@ func TestExtendEpochTransitionValid(t *testing.T) { epoch2Commit := unittest.EpochCommitFixture( unittest.CommitWithCounter(epoch2Setup.Counter), + unittest.WithClusterQCsFromAssignments(epoch2Setup.Assignments), unittest.WithDKGFromParticipants(epoch2Participants), ) @@ -957,7 +958,7 @@ func TestExtendEpochSetupInvalid(t *testing.T) { // add a participant for the next epoch epoch2NewParticipant := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) - epoch2Participants := append(participants, epoch2NewParticipant).Sort(order.ByNodeIDAsc) + epoch2Participants := append(participants, epoch2NewParticipant).Sort(order.Canonical) // this function will return a VALID setup event and seal, we will modify // in different ways in each test case @@ -1044,7 +1045,7 @@ func TestExtendEpochCommitInvalid(t *testing.T) { epoch2Participants := append( participants.Filter(filter.Not(filter.HasRole(flow.RoleConsensus))), epoch2NewParticipant, - ).Sort(order.ByNodeIDAsc) + ).Sort(order.Canonical) createSetup := func(block *flow.Block) (*flow.EpochSetup, *flow.ExecutionReceipt, *flow.Seal) { setup := unittest.EpochSetupFixture( @@ -1053,6 +1054,7 @@ func TestExtendEpochCommitInvalid(t *testing.T) { unittest.WithFinalView(epoch1Setup.FinalView+1000), unittest.WithFirstView(epoch1Setup.FinalView+1), ) + receipt, seal := unittest.ReceiptAndSealForBlock(block) receipt.ExecutionResult.ServiceEvents = []flow.ServiceEvent{setup.ServiceEvent()} seal.ResultID = receipt.ExecutionResult.ID() @@ -1110,7 +1112,7 @@ func TestExtendEpochCommitInvalid(t *testing.T) { // expect a commit event with wrong cluster QCs to trigger EECC without error t.Run("inconsistent cluster QCs (EECC)", func(t *testing.T) { _, receipt, seal := createCommit(block3, func(commit *flow.EpochCommit) { - commit.ClusterQCs = append(commit.ClusterQCs, flow.ClusterQCVoteDataFromQC(unittest.QuorumCertificateFixture())) + commit.ClusterQCs = append(commit.ClusterQCs, flow.ClusterQCVoteDataFromQC(unittest.QuorumCertificateWithSignerIDsFixture())) }) sealingBlock := unittest.SealBlock(t, state, block3, receipt, seal) @@ -1167,7 +1169,7 @@ func TestExtendEpochTransitionWithoutCommit(t *testing.T) { // add a participant for the next epoch epoch2NewParticipant := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) - epoch2Participants := append(participants, epoch2NewParticipant).Sort(order.ByNodeIDAsc) + epoch2Participants := append(participants, epoch2NewParticipant).Sort(order.Canonical) // create the epoch setup event for the second epoch epoch2Setup := unittest.EpochSetupFixture( @@ -1236,7 +1238,7 @@ func TestEmergencyEpochChainContinuation(t *testing.T) { // add a participant for the next epoch epoch2NewParticipant := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) - epoch2Participants := append(participants, epoch2NewParticipant).Sort(order.ByNodeIDAsc) + epoch2Participants := append(participants, epoch2NewParticipant).Sort(order.Canonical) // create the epoch setup event for the second epoch epoch2Setup := unittest.EpochSetupFixture( @@ -1376,7 +1378,7 @@ func TestEmergencyEpochChainContinuation(t *testing.T) { // add a participant for the next epoch epoch2NewParticipant := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) - epoch2Participants := append(participants, epoch2NewParticipant).Sort(order.ByNodeIDAsc) + epoch2Participants := append(participants, epoch2NewParticipant).Sort(order.Canonical) // create the epoch setup event for the second epoch // this event is invalid because it used a non-contiguous first view diff --git a/state/protocol/badger/snapshot.go b/state/protocol/badger/snapshot.go index 1b55a35461f..a2c7197e50d 100644 --- a/state/protocol/badger/snapshot.go +++ b/state/protocol/badger/snapshot.go @@ -86,10 +86,10 @@ func (s *Snapshot) QuorumCertificate() (*flow.QuorumCertificate, error) { } qc := &flow.QuorumCertificate{ - View: head.View, - BlockID: s.blockID, - SignerIDs: child.ParentVoterIDs, - SigData: child.ParentVoterSigData, + View: head.View, + BlockID: s.blockID, + SignerIndices: child.ParentVoterIndices, + SigData: child.ParentVoterSigData, } return qc, nil @@ -162,7 +162,7 @@ func (s *Snapshot) Identities(selector flow.IdentityFilter) (flow.IdentityList, } // sort the identities so the 'Exists' binary search works - identities := setup.Participants.Sort(order.ByNodeIDAsc) + identities := setup.Participants.Sort(order.Canonical) // get identities that are in either last/next epoch but NOT in the current epoch var otherEpochIdentities flow.IdentityList @@ -224,7 +224,7 @@ func (s *Snapshot) Identities(selector flow.IdentityFilter) (flow.IdentityList, identities = identities.Filter(selector) // apply a deterministic sort to the participants - identities = identities.Sort(order.ByNodeIDAsc) + identities = identities.Sort(order.Canonical) return identities, nil } diff --git a/state/protocol/badger/snapshot_test.go b/state/protocol/badger/snapshot_test.go index 3f34f2dbaed..bfefd285220 100644 --- a/state/protocol/badger/snapshot_test.go +++ b/state/protocol/badger/snapshot_test.go @@ -14,6 +14,7 @@ import ( "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/state/protocol" bprotocol "github.com/onflow/flow-go/state/protocol/badger" @@ -212,7 +213,7 @@ func TestClusters(t *testing.T) { setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) commit := result.ServiceEvents[1].Event.(*flow.EpochCommit) setup.Assignments = unittest.ClusterAssignment(uint(nClusters), collectors) - clusterQCs := unittest.QuorumCertificatesFixtures(uint(nClusters)) + clusterQCs := unittest.QuorumCertificatesFromAssignments(setup.Assignments) commit.ClusterQCs = flow.ClusterQCVoteDatasFromQCs(clusterQCs) seal.ResultID = result.ID() @@ -220,7 +221,7 @@ func TestClusters(t *testing.T) { require.NoError(t, err) util.RunWithBootstrapState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.State) { - expectedClusters, err := flow.NewClusterList(setup.Assignments, collectors) + expectedClusters, err := factory.NewClusterList(setup.Assignments, collectors) require.NoError(t, err) actualClusters, err := state.Final().Epochs().Current().Clustering() require.NoError(t, err) @@ -694,7 +695,7 @@ func TestQuorumCertificate(t *testing.T) { qc, err := state.AtBlockID(block1.ID()).QuorumCertificate() assert.Nil(t, err) // should have signatures from valid child (block 2) - assert.Equal(t, block2.Header.ParentVoterIDs, qc.SignerIDs) + assert.Equal(t, block2.Header.ParentVoterIndices, qc.SignerIndices) assert.Equal(t, block2.Header.ParentVoterSigData, qc.SigData) // should have view matching block1 view assert.Equal(t, block1.Header.View, qc.View) diff --git a/state/protocol/badger/state_test.go b/state/protocol/badger/state_test.go index fb275519ccc..e486b032b5e 100644 --- a/state/protocol/badger/state_test.go +++ b/state/protocol/badger/state_test.go @@ -266,7 +266,6 @@ func TestBootstrap_InvalidIdentities(t *testing.T) { t.Run("missing role", func(t *testing.T) { requiredRoles := []flow.Role{ flow.RoleConsensus, - flow.RoleCollection, flow.RoleExecution, flow.RoleVerification, } diff --git a/state/protocol/badger/validity.go b/state/protocol/badger/validity.go index 89d5ac759f2..5a0b5d5c86f 100644 --- a/state/protocol/badger/validity.go +++ b/state/protocol/badger/validity.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/validator" "github.com/onflow/flow-go/consensus/hotstuff/verification" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/state" @@ -134,7 +135,7 @@ func verifyEpochSetup(setup *flow.EpochSetup, verifyNetworkAddress bool) error { } // the collection cluster assignments need to be valid - _, err := flow.NewClusterList(setup.Assignments, activeParticipants.Filter(filter.HasRole(flow.RoleCollection))) + _, err := factory.NewClusterList(setup.Assignments, activeParticipants.Filter(filter.HasRole(flow.RoleCollection))) if err != nil { return fmt.Errorf("invalid cluster assignments: %w", err) } diff --git a/state/protocol/badger/validity_test.go b/state/protocol/badger/validity_test.go index 6c58c475fa3..518e56e88e6 100644 --- a/state/protocol/badger/validity_test.go +++ b/state/protocol/badger/validity_test.go @@ -73,7 +73,7 @@ func TestBootstrapInvalidEpochCommit(t *testing.T) { setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) commit := result.ServiceEvents[1].Event.(*flow.EpochCommit) // add an extra QC to commit - extraQC := unittest.QuorumCertificateFixture() + extraQC := unittest.QuorumCertificateWithSignerIDsFixture() commit.ClusterQCs = append(commit.ClusterQCs, flow.ClusterQCVoteDataFromQC(extraQC)) err := isValidEpochCommit(commit, setup) diff --git a/state/protocol/convert.go b/state/protocol/convert.go index f8ef5f6110a..b130b776d19 100644 --- a/state/protocol/convert.go +++ b/state/protocol/convert.go @@ -3,6 +3,8 @@ package protocol import ( "fmt" + "github.com/onflow/flow-go/module/signature" + "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" @@ -66,14 +68,26 @@ func ToEpochCommit(epoch Epoch) (*flow.EpochCommit, error) { if err != nil { return nil, fmt.Errorf("could not get epoch clustering: %w", err) } - qcs := make([]*flow.QuorumCertificate, 0, len(clustering)) + qcs := make([]*flow.QuorumCertificateWithSignerIDs, 0, len(clustering)) for i := range clustering { cluster, err := epoch.Cluster(uint(i)) if err != nil { return nil, fmt.Errorf("could not get epoch cluster (index=%d): %w", i, err) } - qcs = append(qcs, cluster.RootQC()) + qc := cluster.RootQC() + // TODO: double check cluster.Members returns canonical order + signerIDs, err := signature.DecodeSignerIndicesToIdentifiers(cluster.Members().NodeIDs(), qc.SignerIndices) + if err != nil { + return nil, fmt.Errorf("could not encode signer indices: %w", err) + } + qcs = append(qcs, &flow.QuorumCertificateWithSignerIDs{ + View: qc.View, + BlockID: qc.BlockID, + SignerIDs: signerIDs, + SigData: qc.SigData, + }) } + participants, err := epoch.InitialIdentities() if err != nil { return nil, fmt.Errorf("could not get epoch participants: %w", err) diff --git a/state/protocol/epoch.go b/state/protocol/epoch.go index 154d7a78cc8..55bfeb5fd00 100644 --- a/state/protocol/epoch.go +++ b/state/protocol/epoch.go @@ -77,6 +77,10 @@ type Epoch interface { // given index, in this epoch. Cluster(index uint) (Cluster, error) + // ClusterByChainID returns the detailed cluster information for the cluster with + // the given chain ID, in this epoch + ClusterByChainID(chainID flow.ChainID) (Cluster, error) + // DKG returns the result of the distributed key generation procedure. DKG() (DKG, error) } diff --git a/state/protocol/errors.go b/state/protocol/errors.go index c94ef83a914..e40404b4794 100644 --- a/state/protocol/errors.go +++ b/state/protocol/errors.go @@ -25,6 +25,9 @@ var ( // ErrSealingSegmentBelowRootBlock is a sentinel error returned for queries // for a sealing segment below the root block. ErrSealingSegmentBelowRootBlock = fmt.Errorf("cannot query sealing segment below root block") + + // ErrClusterNotFound is a sentinel error returns for queries for a cluster + ErrClusterNotFound = fmt.Errorf("could not find cluster") ) type IdentityNotFoundError struct { diff --git a/state/protocol/inmem/convert.go b/state/protocol/inmem/convert.go index ce3d0223cb8..734853bbb1b 100644 --- a/state/protocol/inmem/convert.go +++ b/state/protocol/inmem/convert.go @@ -7,6 +7,7 @@ import ( "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/state/protocol" ) @@ -243,6 +244,27 @@ func SnapshotFromBootstrapStateWithProtocolVersion( return nil, fmt.Errorf("invalid commit event type (%T)", result.ServiceEvents[1].Event) } + clustering, err := ClusteringFromSetupEvent(setup) + if err != nil { + return nil, fmt.Errorf("setup event has invalid clustering: %w", err) + } + + // sanity check the commit event has the same number of cluster QC as the number clusters + if len(clustering) != len(commit.ClusterQCs) { + return nil, fmt.Errorf("mismatching number of ClusterQCs, expect %v but got %v", + len(clustering), len(commit.ClusterQCs)) + } + + // sanity check the QC in the commit event, which should be found in the identities in + // the setup event + for i, cluster := range clustering { + rootQCVoteData := commit.ClusterQCs[i] + _, err = signature.EncodeSignersToIndices(cluster.NodeIDs(), rootQCVoteData.VoterIDs) + if err != nil { + return nil, fmt.Errorf("mismatching cluster and qc: %w", err) + } + } + current, err := NewCommittedEpoch(setup, commit) if err != nil { return nil, fmt.Errorf("could not convert epoch: %w", err) diff --git a/state/protocol/inmem/encodable_test.go b/state/protocol/inmem/encodable_test.go index b91eae17657..22459e17b7a 100644 --- a/state/protocol/inmem/encodable_test.go +++ b/state/protocol/inmem/encodable_test.go @@ -17,7 +17,7 @@ import ( // in particular with differing public key implementations func TestEncodeDecode(t *testing.T) { - participants := unittest.IdentityListFixture(10) + participants := unittest.IdentityListFixture(10, unittest.WithAllRoles()) // add a partner, which has its key represented as an encodable wrapper // type rather than the direct crypto type partner := unittest.IdentityFixture(unittest.WithKeys, func(identity *flow.Identity) { @@ -41,7 +41,7 @@ func TestEncodeDecode(t *testing.T) { // TestStrippedEncodeDecode tests that the protocol state snapshot can be encoded to JSON skipping the network address // and decoded back successfully func TestStrippedEncodeDecode(t *testing.T) { - participants := unittest.IdentityListFixture(10) + participants := unittest.IdentityListFixture(10, unittest.WithAllRoles()) initialSnapshot := unittest.RootSnapshotFixture(participants) // encode the snapshot diff --git a/state/protocol/inmem/epoch.go b/state/protocol/inmem/epoch.go index 1656f1113d8..4e70d866b60 100644 --- a/state/protocol/inmem/epoch.go +++ b/state/protocol/inmem/epoch.go @@ -5,7 +5,9 @@ import ( "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/state/cluster" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/invalid" @@ -45,13 +47,31 @@ func (e Epoch) DKG() (protocol.DKG, error) { } func (e Epoch) Cluster(i uint) (protocol.Cluster, error) { - if e.enc.Clusters != nil { - if i >= uint(len(e.enc.Clusters)) { - return nil, fmt.Errorf("no cluster with index %d", i) + if e.enc.Clusters == nil { + return nil, protocol.ErrEpochNotCommitted + } + + if i >= uint(len(e.enc.Clusters)) { + return nil, fmt.Errorf("no cluster with index %d: %w", i, protocol.ErrClusterNotFound) + } + return Cluster{e.enc.Clusters[i]}, nil +} + +func (e Epoch) ClusterByChainID(chainID flow.ChainID) (protocol.Cluster, error) { + if e.enc.Clusters == nil { + return nil, protocol.ErrEpochNotCommitted + } + + for _, cluster := range e.enc.Clusters { + if cluster.RootBlock.Header.ChainID == chainID { + return Cluster{cluster}, nil } - return Cluster{e.enc.Clusters[i]}, nil } - return nil, protocol.ErrEpochNotCommitted + chainIDs := make([]string, 0, len(e.enc.Clusters)) + for _, cluster := range e.enc.Clusters { + chainIDs = append(chainIDs, string(cluster.RootBlock.Header.ChainID)) + } + return nil, fmt.Errorf("no cluster with the given chain ID %v, available chainIDs %v: %w", chainID, chainIDs, protocol.ErrClusterNotFound) } type Epochs struct { @@ -111,8 +131,12 @@ func (es *setupEpoch) InitialIdentities() (flow.IdentityList, error) { } func (es *setupEpoch) Clustering() (flow.ClusterList, error) { + return ClusteringFromSetupEvent(es.setupEvent) +} + +func ClusteringFromSetupEvent(setupEvent *flow.EpochSetup) (flow.ClusterList, error) { collectorFilter := filter.HasRole(flow.RoleCollection) - clustering, err := flow.NewClusterList(es.setupEvent.Assignments, es.setupEvent.Participants.Filter(collectorFilter)) + clustering, err := factory.NewClusterList(setupEvent.Assignments, setupEvent.Participants.Filter(collectorFilter)) if err != nil { return nil, fmt.Errorf("failed to generate ClusterList from collector identities: %w", err) } @@ -123,6 +147,10 @@ func (es *setupEpoch) Cluster(_ uint) (protocol.Cluster, error) { return nil, protocol.ErrEpochNotCommitted } +func (es *setupEpoch) ClusterByChainID(_ flow.ChainID) (protocol.Cluster, error) { + return nil, protocol.ErrEpochNotCommitted +} + func (es *setupEpoch) DKG() (protocol.DKG, error) { return nil, protocol.ErrEpochNotCommitted } @@ -148,6 +176,7 @@ func (es *committedEpoch) Cluster(index uint) (protocol.Cluster, error) { return nil, fmt.Errorf("failed to generate clustering: %w", err) } + // TODO: double check ByIndex returns canonical order members, ok := clustering.ByIndex(index) if !ok { return nil, fmt.Errorf("failed to get members of cluster %d: %w", index, err) @@ -159,12 +188,17 @@ func (es *committedEpoch) Cluster(index uint) (protocol.Cluster, error) { } rootQCVoteData := qcs[index] + signerIndices, err := signature.EncodeSignersToIndices(members.NodeIDs(), rootQCVoteData.VoterIDs) + if err != nil { + return nil, fmt.Errorf("could not encode signer indices for rootQCVoteData.VoterIDs: %w", err) + } + rootBlock := cluster.CanonicalRootBlock(epochCounter, members) rootQC := &flow.QuorumCertificate{ - View: rootBlock.Header.View, - BlockID: rootBlock.ID(), - SignerIDs: rootQCVoteData.VoterIDs, - SigData: rootQCVoteData.SigData, + View: rootBlock.Header.View, + BlockID: rootBlock.ID(), + SignerIndices: signerIndices, + SigData: rootQCVoteData.SigData, } cluster, err := ClusterFromEncodable(EncodableCluster{ diff --git a/state/protocol/invalid/epoch.go b/state/protocol/invalid/epoch.go index a3a2d9c1a6a..da7ff66f3c9 100644 --- a/state/protocol/invalid/epoch.go +++ b/state/protocol/invalid/epoch.go @@ -46,6 +46,10 @@ func (u *Epoch) Cluster(uint) (protocol.Cluster, error) { return nil, u.err } +func (u Epoch) ClusterByChainID(chainID flow.ChainID) (protocol.Cluster, error) { + return nil, u.err +} + func (u *Epoch) DKG() (protocol.DKG, error) { return nil, u.err } diff --git a/state/protocol/mock/epoch.go b/state/protocol/mock/epoch.go index 8c8c0ec25e6..ffe589d009f 100644 --- a/state/protocol/mock/epoch.go +++ b/state/protocol/mock/epoch.go @@ -39,6 +39,29 @@ func (_m *Epoch) Cluster(index uint) (protocol.Cluster, error) { return r0, r1 } +// ClusterByChainID provides a mock function with given fields: chainID +func (_m *Epoch) ClusterByChainID(chainID flow.ChainID) (protocol.Cluster, error) { + ret := _m.Called(chainID) + + var r0 protocol.Cluster + if rf, ok := ret.Get(0).(func(flow.ChainID) protocol.Cluster); ok { + r0 = rf(chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol.Cluster) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(flow.ChainID) error); ok { + r1 = rf(chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // Clustering provides a mock function with given fields: func (_m *Epoch) Clustering() (flow.ClusterList, error) { ret := _m.Called() diff --git a/state/protocol/seed/seed.go b/state/protocol/seed/seed.go index 271bc6c6c54..f8160e1c334 100644 --- a/state/protocol/seed/seed.go +++ b/state/protocol/seed/seed.go @@ -3,7 +3,7 @@ package seed import ( "fmt" - "github.com/onflow/flow-go/consensus/hotstuff/packer" + "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/crypto/random" @@ -34,7 +34,7 @@ const RandomSourceLength = crypto.SignatureLenBLSBLS12381 // The sigData is an RLP encoded structure that is part of QuorumCertificate. func FromParentQCSignature(sigData []byte) ([]byte, error) { // unpack sig data to extract random beacon sig - randomBeaconSig, err := packer.UnpackRandomBeaconSig(sigData) + randomBeaconSig, err := model.UnpackRandomBeaconSig(sigData) if err != nil { return nil, fmt.Errorf("could not unpack block signature: %w", err) } diff --git a/state/protocol/util.go b/state/protocol/util.go index 1d63b83a763..8b5ec92094c 100644 --- a/state/protocol/util.go +++ b/state/protocol/util.go @@ -5,6 +5,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module/signature" ) // IsNodeAuthorizedAt returns whether the node with the given ID is a valid @@ -69,3 +70,30 @@ func IsSporkRootSnapshot(snapshot Snapshot) (bool, error) { } return true, nil } + +// FindGuarantors decodes the signer indices from the guarantee, and finds the guarantor identifiers from protocol state +// Expected Error returns during normal operations: +// * signature.ErrIncompatibleBitVectorLength indicates that `signerIndices` has the wrong length +// * signature.ErrIllegallyPaddedBitVector is the vector is padded with bits other than 0 +// * signature.ErrInvalidChecksum if the input is shorter than the expected checksum contained in the guarantee.SignerIndices +func FindGuarantors(state State, guarantee *flow.CollectionGuarantee) ([]flow.Identifier, error) { + snapshot := state.AtBlockID(guarantee.ReferenceBlockID) + epochs := snapshot.Epochs() + epoch := epochs.Current() + cluster, err := epoch.ClusterByChainID(guarantee.ChainID) + + if err != nil { + // protocol state must have validated the block that contains the guarantee, so the cluster + // must be found, otherwise, it's an internal error + return nil, fmt.Errorf( + "internal error retrieving collector clusters for guarantee (ReferenceBlockID: %v, ChainID: %v): %w", + guarantee.ReferenceBlockID, guarantee.ChainID, err) + } + + guarantorIDs, err := signature.DecodeSignerIndicesToIdentifiers(cluster.Members().NodeIDs(), guarantee.SignerIndices) + if err != nil { + return nil, fmt.Errorf("could not decode signer indices for guarantee %v: %w", guarantee.ID(), err) + } + + return guarantorIDs, nil +} diff --git a/state/protocol/util_test.go b/state/protocol/util_test.go index 57b6c707826..c0c13c8f077 100644 --- a/state/protocol/util_test.go +++ b/state/protocol/util_test.go @@ -12,14 +12,14 @@ import ( func TestIsSporkRootSnapshot(t *testing.T) { t.Run("spork root", func(t *testing.T) { - snapshot := unittest.RootSnapshotFixture(unittest.IdentityListFixture(10)) + snapshot := unittest.RootSnapshotFixture(unittest.IdentityListFixture(10, unittest.WithAllRoles())) isSporkRoot, err := protocol.IsSporkRootSnapshot(snapshot) require.NoError(t, err) assert.True(t, isSporkRoot) }) t.Run("other snapshot", func(t *testing.T) { - snapshot := unittest.RootSnapshotFixture(unittest.IdentityListFixture(10)) + snapshot := unittest.RootSnapshotFixture(unittest.IdentityListFixture(10, unittest.WithAllRoles())) snapshot.Encodable().SealingSegment.Blocks = unittest.BlockFixtures(5) isSporkRoot, err := protocol.IsSporkRootSnapshot(snapshot) require.NoError(t, err) diff --git a/storage/badger/operation/headers_test.go b/storage/badger/operation/headers_test.go index b42f56c8ea6..fc7e09b8cb0 100644 --- a/storage/badger/operation/headers_test.go +++ b/storage/badger/operation/headers_test.go @@ -22,7 +22,7 @@ func TestHeaderInsertCheckRetrieve(t *testing.T) { Timestamp: time.Now().UTC(), ParentID: flow.Identifier{0x11}, PayloadHash: flow.Identifier{0x22}, - ParentVoterIDs: []flow.Identifier{{0x44}}, + ParentVoterIndices: []byte{0x44}, ParentVoterSigData: []byte{0x88}, ProposerID: flow.Identifier{0x33}, ProposerSigData: crypto.Signature{0x77}, diff --git a/utils/unittest/cluster.go b/utils/unittest/cluster.go index 0bd9e65d352..01f8d526f0f 100644 --- a/utils/unittest/cluster.go +++ b/utils/unittest/cluster.go @@ -5,6 +5,7 @@ import ( "sort" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/flow/order" ) @@ -51,7 +52,7 @@ func ClusterAssignment(n uint, nodes flow.IdentityList) flow.AssignmentList { // order, so the same list results in the same sort.Slice(collectors, func(i, j int) bool { - return order.ByNodeIDAsc(collectors[i], collectors[j]) + return order.Canonical(collectors[i], collectors[j]) }) assignments := make(flow.AssignmentList, n) @@ -65,7 +66,7 @@ func ClusterAssignment(n uint, nodes flow.IdentityList) flow.AssignmentList { func ClusterList(n uint, nodes flow.IdentityList) flow.ClusterList { assignments := ClusterAssignment(n, nodes) - clusters, err := flow.NewClusterList(assignments, nodes.Filter(filter.HasRole(flow.RoleCollection))) + clusters, err := factory.NewClusterList(assignments, nodes.Filter(filter.HasRole(flow.RoleCollection))) if err != nil { panic(err) } diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index d41ef67fb09..e4150d7a0b8 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -13,15 +13,15 @@ import ( "github.com/stretchr/testify/require" sdk "github.com/onflow/flow-go-sdk" - hotstuffroot "github.com/onflow/flow-go/consensus/hotstuff" hotstuff "github.com/onflow/flow-go/consensus/hotstuff/model" - hotstuffPacker "github.com/onflow/flow-go/consensus/hotstuff/packer" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/engine/execution/state/delta" + "github.com/onflow/flow-go/ledger/common/bitutils" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/cluster" + "github.com/onflow/flow-go/model/encoding" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/flow/order" @@ -320,11 +320,13 @@ func StateInteractionsFixture() *delta.Snapshot { return &delta.NewView(nil).Interactions().Snapshot } -func BlockWithParentAndProposerFixture(parent *flow.Header, proposer flow.Identifier) flow.Block { +func BlockWithParentAndProposerFixture(parent *flow.Header, proposer flow.Identifier, participantCount int) flow.Block { block := BlockWithParentFixture(parent) block.Header.ProposerID = proposer - block.Header.ParentVoterIDs = []flow.Identifier{proposer} + indices := bitutils.MakeBitVector(10) + bitutils.SetBit(indices, 1) + block.Header.ParentVoterIndices = indices return *block } @@ -435,7 +437,7 @@ func BlockHeaderWithParentFixture(parent *flow.Header) flow.Header { PayloadHash: IdentifierFixture(), Timestamp: time.Now().UTC(), View: view, - ParentVoterIDs: IdentifierListFixture(4), + ParentVoterIndices: SignerIndicesFixture(4), ParentVoterSigData: QCSigDataFixture(), ProposerID: IdentifierFixture(), ProposerSigData: SignatureFixture(), @@ -500,9 +502,9 @@ func WithCollection(collection *flow.Collection) func(guarantee *flow.Collection func CollectionGuaranteeFixture(options ...func(*flow.CollectionGuarantee)) *flow.CollectionGuarantee { guarantee := &flow.CollectionGuarantee{ - CollectionID: IdentifierFixture(), - SignerIDs: IdentifierListFixture(16), - Signature: SignatureFixture(), + CollectionID: IdentifierFixture(), + SignerIndices: RandomBytes(16), + Signature: SignatureFixture(), } for _, option := range options { option(guarantee) @@ -558,12 +560,20 @@ func CollectionFixture(n int) flow.Collection { return flow.Collection{Transactions: transactions} } +func FixedReferenceBlockID() flow.Identifier { + blockID := flow.Identifier{} + blockID[0] = byte(1) + return blockID +} + func CompleteCollectionFixture() *entity.CompleteCollection { txBody := TransactionBodyFixture() return &entity.CompleteCollection{ Guarantee: &flow.CollectionGuarantee{ - CollectionID: flow.Collection{Transactions: []*flow.TransactionBody{&txBody}}.ID(), - Signature: SignatureFixture(), + CollectionID: flow.Collection{Transactions: []*flow.TransactionBody{&txBody}}.ID(), + Signature: SignatureFixture(), + ReferenceBlockID: FixedReferenceBlockID(), + SignerIndices: SignerIndicesFixture(1), }, Transactions: []*flow.TransactionBody{&txBody}, } @@ -572,8 +582,10 @@ func CompleteCollectionFixture() *entity.CompleteCollection { func CompleteCollectionFromTransactions(txs []*flow.TransactionBody) *entity.CompleteCollection { return &entity.CompleteCollection{ Guarantee: &flow.CollectionGuarantee{ - CollectionID: flow.Collection{Transactions: txs}.ID(), - Signature: SignatureFixture(), + CollectionID: flow.Collection{Transactions: txs}.ID(), + Signature: SignatureFixture(), + ReferenceBlockID: IdentifierFixture(), + SignerIndices: SignerIndicesFixture(3), }, Transactions: txs, } @@ -591,9 +603,8 @@ func ExecutableBlockFixtureWithParent(collectionsSignerIDs [][]flow.Identifier, block := BlockWithParentFixture(parent) block.Payload.Guarantees = nil - for _, signerIDs := range collectionsSignerIDs { + for range collectionsSignerIDs { completeCollection := CompleteCollectionFixture() - completeCollection.Guarantee.SignerIDs = signerIDs block.Payload.Guarantees = append(block.Payload.Guarantees, completeCollection.Guarantee) completeCollections[completeCollection.Guarantee.CollectionID] = completeCollection } @@ -604,8 +615,6 @@ func ExecutableBlockFixtureWithParent(collectionsSignerIDs [][]flow.Identifier, Block: block, CompleteCollections: completeCollections, } - // Preload the id - executableBlock.ID() return executableBlock } @@ -855,6 +864,22 @@ func IdentifierFixture() flow.Identifier { return id } +func SignerIndicesFixture(n int) []byte { + indices := bitutils.MakeBitVector(10) + for i := 0; i < n; i++ { + bitutils.SetBit(indices, 1) + } + return indices +} + +func SignerIndicesByIndices(n int, indices []int) []byte { + signers := bitutils.MakeBitVector(n) + for _, i := range indices { + bitutils.SetBit(signers, i) + } + return signers +} + // WithRole adds a role to an identity fixture. func WithRole(role flow.Role) func(*flow.Identity) { return func(identity *flow.Identity) { @@ -941,7 +966,7 @@ func IdentityFixture(opts ...func(*flow.Identity)) *flow.Identity { stakingKey := StakingPrivKeyByIdentifier(nodeID) identity := flow.Identity{ NodeID: nodeID, - Address: fmt.Sprintf("address-%v", nodeID[0:7]), + Address: fmt.Sprintf("address-%x", nodeID[0:7]), Role: flow.RoleConsensus, Weight: 1000, StakingPubKey: stakingKey.PublicKey(), @@ -1132,12 +1157,12 @@ func ChunkStatusListFixture(t *testing.T, blockHeight uint64, result *flow.Execu } func QCSigDataFixture() []byte { - packer := hotstuffPacker.SigDataPacker{} + packer := hotstuff.SigDataPacker{} sigType := RandomBytes(5) for i := range sigType { sigType[i] = sigType[i] % 2 } - sigData := hotstuffPacker.SignatureData{ + sigData := hotstuff.SignatureData{ SigType: sigType, AggregatedStakingSig: SignatureFixture(), AggregatedRandomBeaconSig: SignatureFixture(), @@ -1557,8 +1582,8 @@ func PublicKeysFixture(n int, algo crypto.SigningAlgorithm) []crypto.PublicKey { return pks } -func QuorumCertificateFixture(opts ...func(*flow.QuorumCertificate)) *flow.QuorumCertificate { - qc := flow.QuorumCertificate{ +func QuorumCertificateWithSignerIDsFixture(opts ...func(*flow.QuorumCertificateWithSignerIDs)) *flow.QuorumCertificateWithSignerIDs { + qc := flow.QuorumCertificateWithSignerIDs{ View: uint64(rand.Uint32()), BlockID: IdentifierFixture(), SignerIDs: IdentifierListFixture(3), @@ -1570,6 +1595,37 @@ func QuorumCertificateFixture(opts ...func(*flow.QuorumCertificate)) *flow.Quoru return &qc } +func QuorumCertificatesWithSignerIDsFixtures(n uint, opts ...func(*flow.QuorumCertificateWithSignerIDs)) []*flow.QuorumCertificateWithSignerIDs { + qcs := make([]*flow.QuorumCertificateWithSignerIDs, 0, n) + for i := 0; i < int(n); i++ { + qcs = append(qcs, QuorumCertificateWithSignerIDsFixture(opts...)) + } + return qcs +} + +func QuorumCertificatesFromAssignments(assignment flow.AssignmentList) []*flow.QuorumCertificateWithSignerIDs { + qcs := make([]*flow.QuorumCertificateWithSignerIDs, 0, len(assignment)) + for _, nodes := range assignment { + qc := QuorumCertificateWithSignerIDsFixture() + qc.SignerIDs = nodes + qcs = append(qcs, qc) + } + return qcs +} + +func QuorumCertificateFixture(opts ...func(*flow.QuorumCertificate)) *flow.QuorumCertificate { + qc := flow.QuorumCertificate{ + View: uint64(rand.Uint32()), + BlockID: IdentifierFixture(), + SignerIndices: SignerIndicesFixture(3), + SigData: QCSigDataFixture(), + } + for _, apply := range opts { + apply(&qc) + } + return &qc +} + func QuorumCertificatesFixtures(n uint, opts ...func(*flow.QuorumCertificate)) []*flow.QuorumCertificate { qcs := make([]*flow.QuorumCertificate, 0, n) for i := 0; i < int(n); i++ { @@ -1584,9 +1640,9 @@ func QCWithBlockID(blockID flow.Identifier) func(*flow.QuorumCertificate) { } } -func QCWithSignerIDs(signerIDs []flow.Identifier) func(*flow.QuorumCertificate) { +func QCWithSignerIndices(signerIndices []byte) func(*flow.QuorumCertificate) { return func(qc *flow.QuorumCertificate) { - qc.SignerIDs = signerIDs + qc.SignerIndices = signerIndices } } @@ -1636,19 +1692,19 @@ func VoteForBlockFixture(block *hotstuff.Block, opts ...func(vote *hotstuff.Vote func VoteWithStakingSig() func(*hotstuff.Vote) { return func(vote *hotstuff.Vote) { - vote.SigData = append([]byte{byte(hotstuffroot.SigTypeStaking)}, vote.SigData...) + vote.SigData = append([]byte{byte(encoding.SigTypeStaking)}, vote.SigData...) } } func VoteWithBeaconSig() func(*hotstuff.Vote) { return func(vote *hotstuff.Vote) { - vote.SigData = append([]byte{byte(hotstuffroot.SigTypeRandomBeacon)}, vote.SigData...) + vote.SigData = append([]byte{byte(encoding.SigTypeRandomBeacon)}, vote.SigData...) } } func WithParticipants(participants flow.IdentityList) func(*flow.EpochSetup) { return func(setup *flow.EpochSetup) { - setup.Participants = participants.Sort(order.ByNodeIDAsc) + setup.Participants = participants.Sort(order.Canonical) setup.Assignments = ClusterAssignment(1, participants) } } @@ -1728,9 +1784,11 @@ func WithDKGFromParticipants(participants flow.IdentityList) func(*flow.EpochCom } func WithClusterQCsFromAssignments(assignments flow.AssignmentList) func(*flow.EpochCommit) { - qcs := make([]*flow.QuorumCertificate, 0, len(assignments)) - for _, cluster := range assignments { - qcs = append(qcs, QuorumCertificateFixture(QCWithSignerIDs(cluster))) + qcs := make([]*flow.QuorumCertificateWithSignerIDs, 0, len(assignments)) + for _, assignment := range assignments { + qcWithSignerIndex := QuorumCertificateWithSignerIDsFixture() + qcWithSignerIndex.SignerIDs = assignment + qcs = append(qcs, qcWithSignerIndex) } return func(commit *flow.EpochCommit) { commit.ClusterQCs = flow.ClusterQCVoteDatasFromQCs(qcs) @@ -1757,7 +1815,7 @@ func CommitWithCounter(counter uint64) func(*flow.EpochCommit) { func EpochCommitFixture(opts ...func(*flow.EpochCommit)) *flow.EpochCommit { commit := &flow.EpochCommit{ Counter: uint64(rand.Uint32()), - ClusterQCs: flow.ClusterQCVoteDatasFromQCs(QuorumCertificatesFixtures(1)), + ClusterQCs: flow.ClusterQCVoteDatasFromQCs(QuorumCertificatesWithSignerIDsFixtures(1)), DKGGroupKey: KeyFixture(crypto.BLSBLS12381).PublicKey(), DKGParticipantKeys: PublicKeysFixture(2, crypto.BLSBLS12381), }