Skip to content

Commit

Permalink
Fixed more compilation issues regarding generic identity
Browse files Browse the repository at this point in the history
  • Loading branch information
durkmurder committed Oct 3, 2023
1 parent a9d7587 commit 23cec18
Show file tree
Hide file tree
Showing 67 changed files with 433 additions and 308 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ generate-fvm-env-wrappers:
generate-mocks: install-mock-generators
mockery --name '(Connector|PingInfoProvider)' --dir=network/p2p --case=underscore --output="./network/mocknetwork" --outpkg="mocknetwork"
mockgen -destination=storage/mocks/storage.go -package=mocks github.com/onflow/flow-go/storage Blocks,Headers,Payloads,Collections,Commits,Events,ServiceEvents,TransactionResults
mockgen -destination=module/mocks/network.go -package=mocks github.com/onflow/flow-go/module Local,Requester
#mockgen -destination=module/mocks/network.go -package=mocks github.com/onflow/flow-go/module Local,Requester
mockgen -destination=network/mocknetwork/mock_network.go -package=mocknetwork github.com/onflow/flow-go/network EngineRegistry
mockery --name='.*' --dir=integration/benchmark/mocksiface --case=underscore --output="integration/benchmark/mock" --outpkg="mock"
mockery --name=ExecutionDataStore --dir=module/executiondatasync/execution_data --case=underscore --output="./module/executiondatasync/execution_data/mock" --outpkg="mock"
Expand Down
4 changes: 2 additions & 2 deletions cmd/bootstrap/cmd/clusters.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ import (
// of succeeding the assignment by re-running the function without increasing the internal nodes ratio.
func constructClusterAssignment(partnerNodes, internalNodes []model.NodeInfo) (flow.AssignmentList, flow.ClusterList, error) {

partners := model.ToIdentityList(partnerNodes).Filter(filter.HasRole(flow.RoleCollection))
internals := model.ToIdentityList(internalNodes).Filter(filter.HasRole(flow.RoleCollection))
partners := model.ToIdentityList(partnerNodes).Filter(filter.HasRole[flow.Identity](flow.RoleCollection))
internals := model.ToIdentityList(internalNodes).Filter(filter.HasRole[flow.Identity](flow.RoleCollection))
nClusters := int(flagCollectionClusters)
nCollectors := len(partners) + len(internals)

Expand Down
2 changes: 1 addition & 1 deletion cmd/bootstrap/cmd/constraints.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import (
func ensureUniformNodeWeightsPerRole(allNodes flow.IdentityList) {
// ensure all nodes of the same role have equal weight
for _, role := range flow.Roles() {
withRole := allNodes.Filter(filter.HasRole(role))
withRole := allNodes.Filter(filter.HasRole[flow.Identity](role))
// each role has at least one node so it's safe to access withRole[0]
expectedWeight := withRole[0].Weight
for _, node := range withRole {
Expand Down
2 changes: 1 addition & 1 deletion cmd/bootstrap/cmd/finalize.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ func finalize(cmd *cobra.Command, args []string) {
log.Info().Msg("")

// create flow.IdentityList representation of participant set
participants := model.ToIdentityList(stakingNodes).Sort(order.Canonical)
participants := model.ToIdentityList(stakingNodes).Sort(order.Canonical[flow.Identity])

log.Info().Msg("reading root block data")
block := readRootBlock()
Expand Down
2 changes: 1 addition & 1 deletion cmd/bootstrap/cmd/seal.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func constructRootResultAndSeal(
DKGPhase1FinalView: firstView + flagNumViewsInStakingAuction + flagNumViewsInDKGPhase - 1,
DKGPhase2FinalView: firstView + flagNumViewsInStakingAuction + flagNumViewsInDKGPhase*2 - 1,
DKGPhase3FinalView: firstView + flagNumViewsInStakingAuction + flagNumViewsInDKGPhase*3 - 1,
Participants: participants.Sort(order.Canonical),
Participants: participants.Sort(order.Canonical[flow.Identity]),
Assignments: assignments,
RandomSource: GenerateRandomSeed(flow.EpochSetupRandomSourceLength),
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/bootstrap/run/cluster_qc.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import (

// GenerateClusterRootQC creates votes and generates a QC based on participant data
func GenerateClusterRootQC(signers []bootstrap.NodeInfo, allCommitteeMembers flow.IdentityList, clusterBlock *cluster.Block) (*flow.QuorumCertificate, error) {
if !allCommitteeMembers.Sorted(order.Canonical) {
if !allCommitteeMembers.Sorted(order.Canonical[flow.Identity]) {
return nil, fmt.Errorf("can't create root cluster QC: committee members are not sorted in canonical order")
}
clusterRootBlock := model.GenesisBlockFromFlow(clusterBlock.Header)
Expand Down
2 changes: 1 addition & 1 deletion cmd/bootstrap/run/cluster_qc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ func TestGenerateClusterRootQC(t *testing.T) {
payload := cluster.EmptyPayload(flow.ZeroID)
clusterBlock.SetPayload(payload)

orderedParticipants := model.ToIdentityList(participants).Sort(order.Canonical)
orderedParticipants := model.ToIdentityList(participants).Sort(order.Canonical[flow.Identity])
_, err := GenerateClusterRootQC(participants, orderedParticipants, &clusterBlock)
require.NoError(t, err)
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/bootstrap/run/qc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ func TestGenerateRootQCWithSomeInvalidVotes(t *testing.T) {
}

func createSignerData(t *testing.T, n int) *ParticipantData {
identities := unittest.IdentityListFixture(n).Sort(order.Canonical)
identities := unittest.IdentityListFixture(n).Sort(order.Canonical[flow.Identity])

networkingKeys := unittest.NetworkingKeys(n)
stakingKeys := unittest.StakingKeys(n)
Expand Down
2 changes: 1 addition & 1 deletion cmd/util/cmd/epochs/cmd/move_machine_acct.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ func moveMachineAcctRun(cmd *cobra.Command, args []string) {
}

// identities with machine accounts
machineAcctIdentities := identities.Filter(filter.HasRole(flow.RoleCollection, flow.RoleConsensus))
machineAcctIdentities := identities.Filter(filter.HasRole[flow.Identity](flow.RoleCollection, flow.RoleConsensus))

machineAcctFiles, err := os.ReadDir(flagMachineAccountsSrcDir)
if err != nil {
Expand Down
4 changes: 2 additions & 2 deletions consensus/hotstuff/committees/cluster_committee_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,11 @@ func (suite *ClusterSuite) SetupTest() {
suite.members = unittest.IdentityListFixture(5, unittest.WithRole(flow.RoleCollection))
suite.me = suite.members[0]
counter := uint64(1)
suite.root = clusterstate.CanonicalRootBlock(counter, suite.members)
suite.root = clusterstate.CanonicalRootBlock(counter, suite.members.ToSkeleton())

suite.cluster.On("EpochCounter").Return(counter)
suite.cluster.On("Index").Return(uint(1))
suite.cluster.On("Members").Return(suite.members)
suite.cluster.On("Members").Return(suite.members.ToSkeleton())
suite.cluster.On("RootBlock").Return(suite.root)
suite.epoch.On("Counter").Return(counter, nil)
suite.epoch.On("RandomSource").Return(unittest.SeedFixture(prg.RandomSourceLength), nil)
Expand Down
2 changes: 1 addition & 1 deletion consensus/hotstuff/committees/consensus_committee_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -679,7 +679,7 @@ func newMockEpoch(counter uint64, identities flow.IdentityList, firstView uint64

epoch := new(protocolmock.Epoch)
epoch.On("Counter").Return(counter, nil)
epoch.On("InitialIdentities").Return(identities, nil)
epoch.On("InitialIdentities").Return(identities.ToSkeleton(), nil)
epoch.On("FirstView").Return(firstView, nil)
epoch.On("FinalView").Return(finalView, nil)
if committed {
Expand Down
20 changes: 10 additions & 10 deletions consensus/hotstuff/mocks/dynamic_committee.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 4 additions & 4 deletions consensus/hotstuff/mocks/packer.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 5 additions & 5 deletions consensus/hotstuff/mocks/replicas.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 4 additions & 4 deletions consensus/hotstuff/mocks/verifier.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions consensus/hotstuff/timeoutcollector/timeout_processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ func (s *TimeoutProcessorTestSuite) SetupTest() {
s.validator = mocks.NewValidator(s.T())
s.sigAggregator = mocks.NewTimeoutSignatureAggregator(s.T())
s.notifier = mocks.NewTimeoutCollectorConsumer(s.T())
s.participants = unittest.IdentityListFixture(11, unittest.WithWeight(s.sigWeight)).Sort(order.Canonical).ToSkeleton()
s.participants = unittest.IdentityListFixture(11, unittest.WithWeight(s.sigWeight)).Sort(order.Canonical[flow.Identity]).ToSkeleton()
s.signer = s.participants[0]
s.view = (uint64)(rand.Uint32() + 100)
s.totalWeight = *atomic.NewUint64(0)
Expand Down Expand Up @@ -471,7 +471,7 @@ func TestTimeoutProcessor_BuildVerifyTC(t *testing.T) {
require.NoError(t, err)

signers[identity.NodeID] = verification.NewStakingSigner(me)
}).Sort(order.Canonical)
}).Sort(order.Canonical[flow.Identity])

// utility function which generates a valid timeout for every signer
createTimeouts := func(participants flow.IdentitySkeletonList, view uint64, newestQC *flow.QuorumCertificate, lastViewTC *flow.TimeoutCertificate) []*model.TimeoutObject {
Expand Down
2 changes: 1 addition & 1 deletion consensus/hotstuff/validator/validator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ func (ps *ProposalSuite) SetupTest() {
voterIDs, err := signature.DecodeSignerIndicesToIdentifiers(ps.participants.NodeIDs(), ps.block.QC.SignerIndices)
require.NoError(ps.T(), err)

ps.voters = ps.participants.Filter(filter.HasNodeID(voterIDs...)).ToSkeleton()
ps.voters = ps.participants.Filter(filter.HasNodeID[flow.Identity](voterIDs...)).ToSkeleton()
ps.proposal = &model.Proposal{Block: ps.block}
ps.vote = ps.proposal.ProposerVote()
ps.voter = ps.leader
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ func TestStakingVoteProcessorV2_BuildVerifyQC(t *testing.T) {
require.NoError(t, err)

signers[identity.NodeID] = verification.NewStakingSigner(me)
}).Sort(order.Canonical)
}).Sort(order.Canonical[flow.Identity])

leader := stakingSigners[0]
block := helper.MakeBlock(helper.WithBlockView(view), helper.WithBlockProposer(leader.NodeID))
Expand Down
20 changes: 10 additions & 10 deletions consensus/integration/epoch_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ func TestUnweightedNode(t *testing.T) {
// * same collection node from epoch 1, so cluster QCs are consistent
// * 1 new consensus node, joining at epoch 2
// * random nodes with other roles
currentEpochCollectionNodes, err := rootSnapshot.Identities(filter.HasRole(flow.RoleCollection))
currentEpochCollectionNodes, err := rootSnapshot.Identities(filter.HasRole[flow.Identity](flow.RoleCollection))
require.NoError(t, err)
nextEpochIdentities := unittest.CompleteIdentitySet(
append(
Expand Down Expand Up @@ -121,7 +121,7 @@ func TestEpochTransition_IdentitiesOverlap(t *testing.T) {
removedIdentity := privateNodeInfos[0].Identity()
newIdentity := privateNodeInfos[3].Identity()
nextEpochIdentities := append(
firstEpochIdentities.Filter(filter.Not(filter.HasNodeID(removedIdentity.NodeID))),
firstEpochIdentities.Filter(filter.Not(filter.HasNodeID[flow.Identity](removedIdentity.NodeID))),
newIdentity,
)

Expand Down Expand Up @@ -172,8 +172,8 @@ func TestEpochTransition_IdentitiesDisjoint(t *testing.T) {

nextEpochParticipantData := createConsensusIdentities(t, 3)
nextEpochIdentities := append(
firstEpochIdentities.Filter(filter.Not(filter.HasRole(flow.RoleConsensus))), // remove all consensus nodes
nextEpochParticipantData.Identities()..., // add new consensus nodes
firstEpochIdentities.Filter(filter.Not(filter.HasRole[flow.Identity](flow.RoleConsensus))), // remove all consensus nodes
nextEpochParticipantData.Identities()..., // add new consensus nodes
)

rootSnapshot = withNextEpoch(
Expand Down Expand Up @@ -221,18 +221,18 @@ func withNextEpoch(

currEpoch := &encodableSnapshot.Epochs.Current // take pointer so assignments apply
currentEpochIdentities := currEpoch.InitialIdentities
nextEpochIdentities = nextEpochIdentities.Sort(order.Canonical)
nextEpochIdentities = nextEpochIdentities.Sort(order.Canonical[flow.Identity])

currEpoch.FinalView = currEpoch.FirstView + curEpochViews - 1 // first epoch lasts curEpochViews
encodableSnapshot.Epochs.Next = &inmem.EncodableEpoch{
Counter: currEpoch.Counter + 1,
FirstView: currEpoch.FinalView + 1,
FinalView: currEpoch.FinalView + 1 + 10000,
RandomSource: unittest.SeedFixture(flow.EpochSetupRandomSourceLength),
InitialIdentities: nextEpochIdentities,
InitialIdentities: nextEpochIdentities.ToSkeleton(),
// must include info corresponding to EpochCommit event, since we are
// starting in committed phase
Clustering: unittest.ClusterList(1, nextEpochIdentities),
Clustering: unittest.ClusterList(1, nextEpochIdentities.ToSkeleton()),
Clusters: currEpoch.Clusters,
DKG: &inmem.EncodableDKG{
GroupKey: encodable.RandomBeaconPubKey{
Expand All @@ -257,9 +257,9 @@ func withNextEpoch(
currentEpochIdentities,
// and all the NEW identities in next epoch, with 0 weight
nextEpochIdentities.
Filter(filter.Not(filter.In(currentEpochIdentities))).
Filter(filter.Not(filter.In[flow.Identity](currentEpochIdentities))).
Map(mapfunc.WithWeight(0))...,
).Sort(order.Canonical))
).Sort(order.Canonical[flow.Identity]))

nextEpochIdentities = append(
// all the next epoch identities
Expand All @@ -268,7 +268,7 @@ func withNextEpoch(
currentEpochIdentities.
Filter(filter.Not(filter.In(nextEpochIdentities))).
Map(mapfunc.WithWeight(0))...,
).Sort(order.Canonical)
).Sort(order.Canonical[flow.Identity])

// setup ID has changed, need to update it
convertedEpochSetup, _ := protocol.ToEpochSetup(inmem.NewEpoch(*currEpoch))
Expand Down
Loading

0 comments on commit 23cec18

Please sign in to comment.