From e3ab00d85b311b6e6ba887a518cb302d79b20560 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 24 Mar 2022 19:43:07 -0500 Subject: [PATCH 001/223] Refactor root checkpoint extraction program - Add MaxVersion to reject unsupported version early - Extract code reading the checkpoint file from CLI program into checkpointer.go - Close checkpoint file --- cmd/util/cmd/execution-state-extract/cmd.go | 45 +++---------------- ledger/complete/wal/checkpointer.go | 48 +++++++++++++++++++++ 2 files changed, 55 insertions(+), 38 deletions(-) diff --git a/cmd/util/cmd/execution-state-extract/cmd.go b/cmd/util/cmd/execution-state-extract/cmd.go index c7e102f8ad9..58586910708 100644 --- a/cmd/util/cmd/execution-state-extract/cmd.go +++ b/cmd/util/cmd/execution-state-extract/cmd.go @@ -1,7 +1,6 @@ package extract import ( - "encoding/binary" "encoding/hex" "fmt" "os" @@ -11,7 +10,6 @@ import ( "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" - "github.com/onflow/flow-go/ledger/common/hash" "github.com/onflow/flow-go/ledger/complete/wal" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" @@ -119,45 +117,16 @@ func run(*cobra.Command, []string) { if err != nil { log.Fatal().Err(err).Msg("invalid root checkpoint") } + defer f.Close() - const ( - encMagicSize = 2 - encVersionSize = 2 - crcLength = 4 - encNodeCountSize = 8 - encTrieCountSize = 2 - headerSize = encMagicSize + encVersionSize - ) - - // read checkpoint version - header := make([]byte, headerSize) - n, err := f.Read(header) - if err != nil || n != headerSize { - log.Fatal().Err(err).Msg("failed to read version from root checkpoint") - } - - magic := binary.BigEndian.Uint16(header) - version := binary.BigEndian.Uint16(header[encMagicSize:]) - - if magic != wal.MagicBytes { - log.Fatal().Err(err).Msg("invalid magic bytes in root checkpoint") - } - - if version <= 3 { - _, err = f.Seek(-(hash.HashLen + crcLength), 2 /* relative from end */) - if err != nil { - log.Fatal().Err(err).Msg("invalid root checkpoint") - } - } else { - _, err = f.Seek(-(hash.HashLen + encNodeCountSize + encTrieCountSize + crcLength), 2 /* relative from end */) - if err != nil { - log.Fatal().Err(err).Msg("invalid root checkpoint") - } + rootHash, err := wal.ReadLastTrieRootHashFromCheckpoint(f) + if err != nil { + log.Fatal().Err(err).Msgf("failed to read last root hash in root checkpoint: %s", err.Error()) } - n, err = f.Read(stateCommitment[:]) - if err != nil || n != hash.HashLen { - log.Fatal().Err(err).Msg("failed to read state commitment from root checkpoint") + stateCommitment, err = flow.ToStateCommitment(rootHash[:]) + if err != nil { + log.Fatal().Err(err).Msg("failed to convert state commitment from last root hash in root checkpoint") } } diff --git a/ledger/complete/wal/checkpointer.go b/ledger/complete/wal/checkpointer.go index ad225e984fd..c562dd350a1 100644 --- a/ledger/complete/wal/checkpointer.go +++ b/ledger/complete/wal/checkpointer.go @@ -4,6 +4,7 @@ import ( "bufio" "encoding/binary" "encoding/hex" + "errors" "fmt" "io" "os" @@ -13,6 +14,7 @@ import ( "strings" "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/hash" "github.com/onflow/flow-go/ledger/complete/mtrie" "github.com/onflow/flow-go/ledger/complete/mtrie/flattener" "github.com/onflow/flow-go/ledger/complete/mtrie/node" @@ -42,6 +44,10 @@ const VersionV4 uint16 = 0x04 // See EncodeNode() and EncodeTrie() for more details. const VersionV5 uint16 = 0x05 +// MaxVersion is the latest checkpoint version we support. +// Need to update MaxVersion when creating a newer version. +const MaxVersion = VersionV5 + const ( encMagicSize = 2 encVersionSize = 2 @@ -769,3 +775,45 @@ func readCheckpointV5(f *os.File) ([]*trie.MTrie, error) { return tries, nil } + +// ReadLastTrieRootHashFromCheckpoint returns last trie's root hash from checkpoint file f. +func ReadLastTrieRootHashFromCheckpoint(f *os.File) (hash.Hash, error) { + + // read checkpoint version + header := make([]byte, headerSize) + n, err := f.Read(header) + if err != nil || n != headerSize { + return hash.DummyHash, errors.New("failed to read checkpoint header") + } + + magic := binary.BigEndian.Uint16(header) + version := binary.BigEndian.Uint16(header[encMagicSize:]) + + if magic != MagicBytes { + return hash.DummyHash, errors.New("invalid magic bytes in checkpoint") + } + + if version > MaxVersion { + return hash.DummyHash, fmt.Errorf("unsupported version %d in checkpoint", version) + } + + if version <= 3 { + _, err = f.Seek(-(hash.HashLen + crc32SumSize), 2 /* relative from end */) + if err != nil { + return hash.DummyHash, errors.New("invalid checkpoint") + } + } else { + _, err = f.Seek(-(hash.HashLen + encNodeCountSize + encTrieCountSize + crc32SumSize), 2 /* relative from end */) + if err != nil { + return hash.DummyHash, errors.New("invalid checkpoint") + } + } + + var lastTrieRootHash hash.Hash + n, err = f.Read(lastTrieRootHash[:]) + if err != nil || n != hash.HashLen { + return hash.DummyHash, errors.New("failed to read last trie root hash from checkpoint") + } + + return lastTrieRootHash, nil +} From 30e03e4c9d124fad3f90e2c033442fa0fb806c23 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Wed, 1 Jun 2022 20:32:57 +0200 Subject: [PATCH 002/223] Fix error handling on contract function invocations --- fvm/scriptEnv.go | 15 +++------------ fvm/transactionEnv.go | 15 +++------------ go.mod | 4 ++-- go.sum | 8 ++++---- integration/go.mod | 4 ++-- integration/go.sum | 8 ++++---- 6 files changed, 18 insertions(+), 36 deletions(-) diff --git a/fvm/scriptEnv.go b/fvm/scriptEnv.go index 105f89437d5..d89a9f4d7d4 100644 --- a/fvm/scriptEnv.go +++ b/fvm/scriptEnv.go @@ -290,13 +290,8 @@ func (e *ScriptEnv) GetStorageCapacity(address common.Address) (value uint64, er accountStorageCapacity := AccountStorageCapacityInvocation(e, e.traceSpan) result, invokeErr := accountStorageCapacity(address) - // TODO: Figure out how to handle this error. Currently if a runtime error occurs, storage capacity will be 0. - // 1. An error will occur if user has removed their FlowToken.Vault -- should this be allowed? - // 2. There will also be an error in case the accounts balance times megabytesPerFlow constant overflows, - // which shouldn't happen unless the the price of storage is reduced at least 100 fold - // 3. Any other error indicates a bug in our implementation. How can we reliably check the Cadence error? if invokeErr != nil { - return 0, nil + return 0, errors.HandleRuntimeError(invokeErr) } // Return type is actually a UFix64 with the unit of megabytes so some conversion is necessary @@ -318,9 +313,8 @@ func (e *ScriptEnv) GetAccountBalance(address common.Address) (value uint64, err accountBalance := AccountBalanceInvocation(e, e.traceSpan) result, invokeErr := accountBalance(address) - // TODO: Figure out how to handle this error. Currently if a runtime error occurs, balance will be 0. if invokeErr != nil { - return 0, nil + return 0, errors.HandleRuntimeError(invokeErr) } return result.ToGoValue().(uint64), nil } @@ -339,11 +333,8 @@ func (e *ScriptEnv) GetAccountAvailableBalance(address common.Address) (value ui accountAvailableBalance := AccountAvailableBalanceInvocation(e, e.traceSpan) result, invokeErr := accountAvailableBalance(address) - // TODO: Figure out how to handle this error. Currently if a runtime error occurs, available balance will be 0. - // 1. An error will occur if user has removed their FlowToken.Vault -- should this be allowed? - // 2. Any other error indicates a bug in our implementation. How can we reliably check the Cadence error? if invokeErr != nil { - return 0, nil + return 0, errors.HandleRuntimeError(invokeErr) } return result.ToGoValue().(uint64), nil } diff --git a/fvm/transactionEnv.go b/fvm/transactionEnv.go index e2515686143..8aba7edc04d 100644 --- a/fvm/transactionEnv.go +++ b/fvm/transactionEnv.go @@ -438,13 +438,8 @@ func (e *TransactionEnv) GetStorageCapacity(address common.Address) (value uint6 accountStorageCapacity := AccountStorageCapacityInvocation(e, e.traceSpan) result, invokeErr := accountStorageCapacity(address) - // TODO: Figure out how to handle this error. Currently if a runtime error occurs, storage capacity will be 0. - // 1. An error will occur if user has removed their FlowToken.Vault -- should this be allowed? - // 2. There will also be an error in case the accounts balance times megabytesPerFlow constant overflows, - // which shouldn't happen unless the the price of storage is reduced at least 100 fold - // 3. Any other error indicates a bug in our implementation. How can we reliably check the Cadence error? if invokeErr != nil { - return 0, nil + return 0, errors.HandleRuntimeError(invokeErr) } return storageMBUFixToBytesUInt(result), nil @@ -471,9 +466,8 @@ func (e *TransactionEnv) GetAccountBalance(address common.Address) (value uint64 accountBalance := AccountBalanceInvocation(e, e.traceSpan) result, invokeErr := accountBalance(address) - // TODO: Figure out how to handle this error. Currently if a runtime error occurs, balance will be 0. if invokeErr != nil { - return 0, nil + return 0, errors.HandleRuntimeError(invokeErr) } return result.ToGoValue().(uint64), nil } @@ -492,11 +486,8 @@ func (e *TransactionEnv) GetAccountAvailableBalance(address common.Address) (val accountAvailableBalance := AccountAvailableBalanceInvocation(e, e.traceSpan) result, invokeErr := accountAvailableBalance(address) - // TODO: Figure out how to handle this error. Currently if a runtime error occurs, available balance will be 0. - // 1. An error will occur if user has removed their FlowToken.Vault -- should this be allowed? - // 2. Any other error indicates a bug in our implementation. How can we reliably check the Cadence error? if invokeErr != nil { - return 0, nil + return 0, errors.HandleRuntimeError(invokeErr) } return result.ToGoValue().(uint64), nil } diff --git a/go.mod b/go.mod index 57efd635469..31acd641fc8 100644 --- a/go.mod +++ b/go.mod @@ -56,8 +56,8 @@ require ( github.com/onflow/atree v0.3.1-0.20220531231935-525fbc26f40a github.com/onflow/cadence v0.21.3-0.20220601002855-8b113c539a2c github.com/onflow/flow v0.3.0 - github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220513155751-c4c1f8d59f83 - github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220513155751-c4c1f8d59f83 + github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220601181637-450d5563fa7d + github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220601181637-450d5563fa7d github.com/onflow/flow-emulator v0.31.2-0.20220513151845-ef7513cb1cd0 github.com/onflow/flow-go-sdk v0.24.1-0.20220513205729-d1f58d47c4e3 github.com/onflow/flow-go/crypto v0.24.3 diff --git a/go.sum b/go.sum index 3dfbb9edacb..5fe8756ca90 100644 --- a/go.sum +++ b/go.sum @@ -1413,12 +1413,12 @@ github.com/onflow/flow v0.3.0/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP github.com/onflow/flow-core-contracts/lib/go/contracts v0.7.3-0.20210527134022-58c25247091a/go.mod h1:IZ2e7UyLCYmpQ8Kd7k0A32uXqdqfiV1r2sKs5/riblo= github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220413172500-d89ca96e6db3/go.mod h1:T6yhM+kWrFxiP6F3hh8lh9DcocHfmv48P4ITnjLhKSk= github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220422202806-92ad02a996cc/go.mod h1:T6yhM+kWrFxiP6F3hh8lh9DcocHfmv48P4ITnjLhKSk= -github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220513155751-c4c1f8d59f83 h1:mpJirFu/JWMLV0IhKDZleVrVdN5B8QERV4gSXDef5bA= -github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220513155751-c4c1f8d59f83/go.mod h1:T6yhM+kWrFxiP6F3hh8lh9DcocHfmv48P4ITnjLhKSk= +github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220601181637-450d5563fa7d h1:XxR0IyxTKIMg01pUG4jMFxx5Yvr8dXxL3lwzRvmv+UY= +github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220601181637-450d5563fa7d/go.mod h1:T6yhM+kWrFxiP6F3hh8lh9DcocHfmv48P4ITnjLhKSk= github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220413172500-d89ca96e6db3/go.mod h1:JB2hWVxUjhMshUDNVQKfn4dzhhawOO+i3XjlhLMV5MM= github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220422202806-92ad02a996cc/go.mod h1:JB2hWVxUjhMshUDNVQKfn4dzhhawOO+i3XjlhLMV5MM= -github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220513155751-c4c1f8d59f83 h1:w4uXFTvjQmLtA/X50H4YXVlzbdsoL3vDI3Y86jtJOMM= -github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220513155751-c4c1f8d59f83/go.mod h1:JB2hWVxUjhMshUDNVQKfn4dzhhawOO+i3XjlhLMV5MM= +github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220601181637-450d5563fa7d h1:9W7/3ummJ3ESEjIPfcexm9GDl8hUSaujzfKHsBvjILQ= +github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220601181637-450d5563fa7d/go.mod h1:JB2hWVxUjhMshUDNVQKfn4dzhhawOO+i3XjlhLMV5MM= github.com/onflow/flow-emulator v0.20.3/go.mod h1:xNdVsrMJiAaYJ59Dwo+xvj0ENdvk/bI14zkGN4V0ozs= github.com/onflow/flow-emulator v0.30.1-0.20220421153717-0a0abc4d580b/go.mod h1:5IsytpArI/wN2ZZXCRAAcIp/223PmVDnmPxbRZO6IbU= github.com/onflow/flow-emulator v0.31.2-0.20220421202209-eb83f9bfda53/go.mod h1:4jyaXs+wHHI0JlBi3/+K9DciPzIve3+MFrNXRAnBEl4= diff --git a/integration/go.mod b/integration/go.mod index 15c7f246307..16bf7ee3019 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -10,8 +10,8 @@ require ( github.com/go-yaml/yaml v2.1.0+incompatible github.com/jedib0t/go-pretty v4.3.0+incompatible github.com/onflow/cadence v0.21.3-0.20220601002855-8b113c539a2c - github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220513155751-c4c1f8d59f83 - github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220513155751-c4c1f8d59f83 + github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220601181637-450d5563fa7d + github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220601181637-450d5563fa7d github.com/onflow/flow-emulator v0.31.2-0.20220513151845-ef7513cb1cd0 github.com/onflow/flow-ft/lib/go/templates v0.2.0 github.com/onflow/flow-go v0.25.13-0.20220513151142-7858f76e703b // replaced by version on-disk diff --git a/integration/go.sum b/integration/go.sum index b4e7240a2ba..62b6635e5e0 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -1492,10 +1492,10 @@ github.com/onflow/cadence v0.21.3-0.20220513161637-08b93d4bb7b9/go.mod h1:vNIxF1 github.com/onflow/cadence v0.21.3-0.20220601002855-8b113c539a2c h1:l2Oc/aZ1FbzJBU5bbsBm6jjYe7aHLOp++ymLL2dTHRQ= github.com/onflow/cadence v0.21.3-0.20220601002855-8b113c539a2c/go.mod h1:FliGP1FZLEuSemnSf8pKItDzW7E2cvPukx/SsE1+oCo= github.com/onflow/flow v0.3.0/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP7+c= -github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220513155751-c4c1f8d59f83 h1:mpJirFu/JWMLV0IhKDZleVrVdN5B8QERV4gSXDef5bA= -github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220513155751-c4c1f8d59f83/go.mod h1:T6yhM+kWrFxiP6F3hh8lh9DcocHfmv48P4ITnjLhKSk= -github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220513155751-c4c1f8d59f83 h1:w4uXFTvjQmLtA/X50H4YXVlzbdsoL3vDI3Y86jtJOMM= -github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220513155751-c4c1f8d59f83/go.mod h1:JB2hWVxUjhMshUDNVQKfn4dzhhawOO+i3XjlhLMV5MM= +github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220601181637-450d5563fa7d h1:XxR0IyxTKIMg01pUG4jMFxx5Yvr8dXxL3lwzRvmv+UY= +github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220601181637-450d5563fa7d/go.mod h1:T6yhM+kWrFxiP6F3hh8lh9DcocHfmv48P4ITnjLhKSk= +github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220601181637-450d5563fa7d h1:9W7/3ummJ3ESEjIPfcexm9GDl8hUSaujzfKHsBvjILQ= +github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220601181637-450d5563fa7d/go.mod h1:JB2hWVxUjhMshUDNVQKfn4dzhhawOO+i3XjlhLMV5MM= github.com/onflow/flow-emulator v0.31.2-0.20220513151845-ef7513cb1cd0 h1:QfLN/O4VkLhrUoTVmU5+/e5gIxI3gRpOjhz06UKsSEQ= github.com/onflow/flow-emulator v0.31.2-0.20220513151845-ef7513cb1cd0/go.mod h1:XLZfTEaYX2151TcTvZPIj7taBN0qpsOoyvEBRZzlXtQ= github.com/onflow/flow-ft/lib/go/contracts v0.5.0 h1:Cg4gHGVblxcejfNNG5Mfj98Wf4zbY76O0Y28QB0766A= From 1236aff95c8175e7d1a8637cb31ba741fcbe93a2 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Wed, 1 Jun 2022 21:04:11 +0200 Subject: [PATCH 003/223] fix unit tests --- engine/execution/state/bootstrap/bootstrap_test.go | 2 +- utils/unittest/execution_state.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/engine/execution/state/bootstrap/bootstrap_test.go b/engine/execution/state/bootstrap/bootstrap_test.go index 33613c6909b..f603ad22e09 100644 --- a/engine/execution/state/bootstrap/bootstrap_test.go +++ b/engine/execution/state/bootstrap/bootstrap_test.go @@ -47,7 +47,7 @@ func TestBootstrapLedger(t *testing.T) { } func TestBootstrapLedger_ZeroTokenSupply(t *testing.T) { - expectedStateCommitmentBytes, _ := hex.DecodeString("cae2f2d6c53582503dad30dba8a8bba098ff8654d19b820a49e1961c1f459a41") + expectedStateCommitmentBytes, _ := hex.DecodeString("349715decb402a5f756cbc37372554cb93191458c6e9293a9655464b450754ca") expectedStateCommitment, err := flow.ToStateCommitment(expectedStateCommitmentBytes) require.NoError(t, err) diff --git a/utils/unittest/execution_state.go b/utils/unittest/execution_state.go index 3a10537863b..5fab9cf821b 100644 --- a/utils/unittest/execution_state.go +++ b/utils/unittest/execution_state.go @@ -24,7 +24,7 @@ const ServiceAccountPrivateKeySignAlgo = crypto.ECDSAP256 const ServiceAccountPrivateKeyHashAlgo = hash.SHA2_256 // Pre-calculated state commitment with root account with the above private key -const GenesisStateCommitmentHex = "5b8f8283d5e719672cb53c0e20a822bf0782f4345d09df076c14fba4d9e21da0" +const GenesisStateCommitmentHex = "cfd13767357a5ab910a499b1dcf10b919be205c1bb63593db648c367eee7a71d" var GenesisStateCommitment flow.StateCommitment From e28a36bd4a19a97eae1f3c0fd4cf4dc7b07f26f1 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Wed, 1 Jun 2022 23:12:59 +0200 Subject: [PATCH 004/223] add tests --- fvm/accounts_test.go | 139 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 128 insertions(+), 11 deletions(-) diff --git a/fvm/accounts_test.go b/fvm/accounts_test.go index 7ae1c81b60a..892a1373381 100644 --- a/fvm/accounts_test.go +++ b/fvm/accounts_test.go @@ -1228,7 +1228,7 @@ func TestAccountBalanceFields(t *testing.T) { account := createAccount(t, vm, chain, ctx, view, programs) txBody := transferTokensTx(chain). - AddArgument(jsoncdc.MustEncode(cadence.UFix64(1_0000_0000))). + AddArgument(jsoncdc.MustEncode(cadence.UFix64(100_000_000))). AddArgument(jsoncdc.MustEncode(cadence.Address(account))). AddAuthorizer(chain.ServiceAddress()) @@ -1248,7 +1248,30 @@ func TestAccountBalanceFields(t *testing.T) { assert.NoError(t, err) - assert.Equal(t, cadence.UFix64(1_0000_0000), script.Value) + assert.Equal(t, cadence.UFix64(100_000_000), script.Value) + }), + ) + + t.Run("Get balance fails for accounts that dont exist", + newVMTest().withContextOptions( + fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(zerolog.Nop())), + fvm.WithCadenceLogging(true), + ). + run(func(t *testing.T, vm *fvm.VirtualMachine, chain flow.Chain, ctx fvm.Context, view state.View, programs *programs.Programs) { + nonExistentAddress, err := chain.AddressAtIndex(100) + require.NoError(t, err) + + script := fvm.Script([]byte(fmt.Sprintf(` + pub fun main(): UFix64 { + let acc = getAccount(0x%s) + return acc.balance + } + `, nonExistentAddress))) + + err = vm.Run(ctx, script, view, programs) + + require.NoError(t, err) + require.Error(t, script.Err) }), ) @@ -1258,13 +1281,13 @@ func TestAccountBalanceFields(t *testing.T) { fvm.WithCadenceLogging(true), fvm.WithAccountStorageLimit(false), ).withBootstrapProcedureOptions( - fvm.WithStorageMBPerFLOW(10_0000_0000), + fvm.WithStorageMBPerFLOW(1000_000_000), ). run(func(t *testing.T, vm *fvm.VirtualMachine, chain flow.Chain, ctx fvm.Context, view state.View, programs *programs.Programs) { account := createAccount(t, vm, chain, ctx, view, programs) txBody := transferTokensTx(chain). - AddArgument(jsoncdc.MustEncode(cadence.UFix64(1_0000_0000))). + AddArgument(jsoncdc.MustEncode(cadence.UFix64(100_000_000))). AddArgument(jsoncdc.MustEncode(cadence.Address(account))). AddAuthorizer(chain.ServiceAddress()) @@ -1284,7 +1307,33 @@ func TestAccountBalanceFields(t *testing.T) { assert.NoError(t, err) assert.NoError(t, script.Err) - assert.Equal(t, cadence.UFix64(9999_2520), script.Value) + assert.Equal(t, cadence.UFix64(99_992_520), script.Value) + }), + ) + + t.Run("Get available balance fails for accounts that don't exist", + newVMTest().withContextOptions( + fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(zerolog.Nop())), + fvm.WithCadenceLogging(true), + fvm.WithAccountStorageLimit(false), + ).withBootstrapProcedureOptions( + fvm.WithStorageMBPerFLOW(1_000_000_000), + ). + run(func(t *testing.T, vm *fvm.VirtualMachine, chain flow.Chain, ctx fvm.Context, view state.View, programs *programs.Programs) { + nonExistentAddress, err := chain.AddressAtIndex(100) + require.NoError(t, err) + + script := fvm.Script([]byte(fmt.Sprintf(` + pub fun main(): UFix64 { + let acc = getAccount(0x%s) + return acc.availableBalance + } + `, nonExistentAddress))) + + err = vm.Run(ctx, script, view, programs) + + require.NoError(t, err) + require.Error(t, script.Err) }), ) @@ -1294,15 +1343,15 @@ func TestAccountBalanceFields(t *testing.T) { fvm.WithCadenceLogging(true), fvm.WithAccountStorageLimit(false), ).withBootstrapProcedureOptions( - fvm.WithStorageMBPerFLOW(10_0000_0000), - fvm.WithAccountCreationFee(10_0000), - fvm.WithMinimumStorageReservation(10_0000), + fvm.WithStorageMBPerFLOW(1000_000_000), + fvm.WithAccountCreationFee(100_000), + fvm.WithMinimumStorageReservation(100_000), ). run(func(t *testing.T, vm *fvm.VirtualMachine, chain flow.Chain, ctx fvm.Context, view state.View, programs *programs.Programs) { account := createAccount(t, vm, chain, ctx, view, programs) txBody := transferTokensTx(chain). - AddArgument(jsoncdc.MustEncode(cadence.UFix64(1_0000_0000))). + AddArgument(jsoncdc.MustEncode(cadence.UFix64(100_000_000))). AddArgument(jsoncdc.MustEncode(cadence.Address(account))). AddAuthorizer(chain.ServiceAddress()) @@ -1323,8 +1372,76 @@ func TestAccountBalanceFields(t *testing.T) { assert.NoError(t, err) assert.NoError(t, script.Err) - // Should be 1_0000_0000 because 10_0000 was given to it during account creation and is now locked up - assert.Equal(t, cadence.UFix64(1_0000_0000), script.Value) + // Should be 100_000_000 because 100_000 was given to it during account creation and is now locked up + assert.Equal(t, cadence.UFix64(100_000_000), script.Value) + }), + ) +} + +func TestGetStorageCapacity(t *testing.T) { + t.Run("Get storage capacity", + newVMTest().withContextOptions( + fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(zerolog.Nop())), + fvm.WithCadenceLogging(true), + fvm.WithAccountStorageLimit(false), + ).withBootstrapProcedureOptions( + fvm.WithStorageMBPerFLOW(1_000_000_000), + fvm.WithAccountCreationFee(100_000), + fvm.WithMinimumStorageReservation(100_000), + ). + run(func(t *testing.T, vm *fvm.VirtualMachine, chain flow.Chain, ctx fvm.Context, view state.View, programs *programs.Programs) { + account := createAccount(t, vm, chain, ctx, view, programs) + + txBody := transferTokensTx(chain). + AddArgument(jsoncdc.MustEncode(cadence.UFix64(100_000_000))). + AddArgument(jsoncdc.MustEncode(cadence.Address(account))). + AddAuthorizer(chain.ServiceAddress()) + + tx := fvm.Transaction(txBody, 0) + + err := vm.Run(ctx, tx, view, programs) + require.NoError(t, err) + + script := fvm.Script([]byte(fmt.Sprintf(` + pub fun main(): UInt64 { + let acc = getAccount(0x%s) + return acc.storageCapacity + } + `, account))) + + err = vm.Run(ctx, script, view, programs) + + require.NoError(t, err) + require.NoError(t, script.Err) + + require.Equal(t, cadence.UInt64(10_010_000), script.Value) + }), + ) + t.Run("Get storage capacity fails for accounts that don't exist", + newVMTest().withContextOptions( + fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(zerolog.Nop())), + fvm.WithCadenceLogging(true), + fvm.WithAccountStorageLimit(false), + ).withBootstrapProcedureOptions( + fvm.WithStorageMBPerFLOW(1_000_000_000), + fvm.WithAccountCreationFee(100_000), + fvm.WithMinimumStorageReservation(100_000), + ). + run(func(t *testing.T, vm *fvm.VirtualMachine, chain flow.Chain, ctx fvm.Context, view state.View, programs *programs.Programs) { + nonExistentAddress, err := chain.AddressAtIndex(100) + require.NoError(t, err) + + script := fvm.Script([]byte(fmt.Sprintf(` + pub fun main(): UInt64 { + let acc = getAccount(0x%s) + return acc.storageCapacity + } + `, nonExistentAddress))) + + err = vm.Run(ctx, script, view, programs) + + require.NoError(t, err) + require.Error(t, script.Err) }), ) } From f738f8453b8bd5cc5bbb1553f1040ede99e3fa32 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Wed, 1 Jun 2022 23:31:03 +0200 Subject: [PATCH 005/223] change core contracts version --- go.mod | 4 ++-- go.sum | 8 ++++---- integration/go.mod | 4 ++-- integration/go.sum | 8 ++++---- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 31acd641fc8..3463893c86b 100644 --- a/go.mod +++ b/go.mod @@ -56,8 +56,8 @@ require ( github.com/onflow/atree v0.3.1-0.20220531231935-525fbc26f40a github.com/onflow/cadence v0.21.3-0.20220601002855-8b113c539a2c github.com/onflow/flow v0.3.0 - github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220601181637-450d5563fa7d - github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220601181637-450d5563fa7d + github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220601212727-0bb6b1f83a9c + github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220601212727-0bb6b1f83a9c github.com/onflow/flow-emulator v0.31.2-0.20220513151845-ef7513cb1cd0 github.com/onflow/flow-go-sdk v0.24.1-0.20220513205729-d1f58d47c4e3 github.com/onflow/flow-go/crypto v0.24.3 diff --git a/go.sum b/go.sum index 5fe8756ca90..7cd1088847a 100644 --- a/go.sum +++ b/go.sum @@ -1413,12 +1413,12 @@ github.com/onflow/flow v0.3.0/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP github.com/onflow/flow-core-contracts/lib/go/contracts v0.7.3-0.20210527134022-58c25247091a/go.mod h1:IZ2e7UyLCYmpQ8Kd7k0A32uXqdqfiV1r2sKs5/riblo= github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220413172500-d89ca96e6db3/go.mod h1:T6yhM+kWrFxiP6F3hh8lh9DcocHfmv48P4ITnjLhKSk= github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220422202806-92ad02a996cc/go.mod h1:T6yhM+kWrFxiP6F3hh8lh9DcocHfmv48P4ITnjLhKSk= -github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220601181637-450d5563fa7d h1:XxR0IyxTKIMg01pUG4jMFxx5Yvr8dXxL3lwzRvmv+UY= -github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220601181637-450d5563fa7d/go.mod h1:T6yhM+kWrFxiP6F3hh8lh9DcocHfmv48P4ITnjLhKSk= +github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220601212727-0bb6b1f83a9c h1:c1FOD8maR58JE0lohZk5fxUi7kLLMWNAxW48Jw4aopY= +github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220601212727-0bb6b1f83a9c/go.mod h1:T6yhM+kWrFxiP6F3hh8lh9DcocHfmv48P4ITnjLhKSk= github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220413172500-d89ca96e6db3/go.mod h1:JB2hWVxUjhMshUDNVQKfn4dzhhawOO+i3XjlhLMV5MM= github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220422202806-92ad02a996cc/go.mod h1:JB2hWVxUjhMshUDNVQKfn4dzhhawOO+i3XjlhLMV5MM= -github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220601181637-450d5563fa7d h1:9W7/3ummJ3ESEjIPfcexm9GDl8hUSaujzfKHsBvjILQ= -github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220601181637-450d5563fa7d/go.mod h1:JB2hWVxUjhMshUDNVQKfn4dzhhawOO+i3XjlhLMV5MM= +github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220601212727-0bb6b1f83a9c h1:TCJ+dn+Q2g80WxiuGc84Jiu++rxuxUnGI1VRYLbg5Ns= +github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220601212727-0bb6b1f83a9c/go.mod h1:JB2hWVxUjhMshUDNVQKfn4dzhhawOO+i3XjlhLMV5MM= github.com/onflow/flow-emulator v0.20.3/go.mod h1:xNdVsrMJiAaYJ59Dwo+xvj0ENdvk/bI14zkGN4V0ozs= github.com/onflow/flow-emulator v0.30.1-0.20220421153717-0a0abc4d580b/go.mod h1:5IsytpArI/wN2ZZXCRAAcIp/223PmVDnmPxbRZO6IbU= github.com/onflow/flow-emulator v0.31.2-0.20220421202209-eb83f9bfda53/go.mod h1:4jyaXs+wHHI0JlBi3/+K9DciPzIve3+MFrNXRAnBEl4= diff --git a/integration/go.mod b/integration/go.mod index 16bf7ee3019..091497e5bc9 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -10,8 +10,8 @@ require ( github.com/go-yaml/yaml v2.1.0+incompatible github.com/jedib0t/go-pretty v4.3.0+incompatible github.com/onflow/cadence v0.21.3-0.20220601002855-8b113c539a2c - github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220601181637-450d5563fa7d - github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220601181637-450d5563fa7d + github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220601212727-0bb6b1f83a9c + github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220601212727-0bb6b1f83a9c github.com/onflow/flow-emulator v0.31.2-0.20220513151845-ef7513cb1cd0 github.com/onflow/flow-ft/lib/go/templates v0.2.0 github.com/onflow/flow-go v0.25.13-0.20220513151142-7858f76e703b // replaced by version on-disk diff --git a/integration/go.sum b/integration/go.sum index 62b6635e5e0..2aa51b9a772 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -1492,10 +1492,10 @@ github.com/onflow/cadence v0.21.3-0.20220513161637-08b93d4bb7b9/go.mod h1:vNIxF1 github.com/onflow/cadence v0.21.3-0.20220601002855-8b113c539a2c h1:l2Oc/aZ1FbzJBU5bbsBm6jjYe7aHLOp++ymLL2dTHRQ= github.com/onflow/cadence v0.21.3-0.20220601002855-8b113c539a2c/go.mod h1:FliGP1FZLEuSemnSf8pKItDzW7E2cvPukx/SsE1+oCo= github.com/onflow/flow v0.3.0/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP7+c= -github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220601181637-450d5563fa7d h1:XxR0IyxTKIMg01pUG4jMFxx5Yvr8dXxL3lwzRvmv+UY= -github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220601181637-450d5563fa7d/go.mod h1:T6yhM+kWrFxiP6F3hh8lh9DcocHfmv48P4ITnjLhKSk= -github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220601181637-450d5563fa7d h1:9W7/3ummJ3ESEjIPfcexm9GDl8hUSaujzfKHsBvjILQ= -github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220601181637-450d5563fa7d/go.mod h1:JB2hWVxUjhMshUDNVQKfn4dzhhawOO+i3XjlhLMV5MM= +github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220601212727-0bb6b1f83a9c h1:c1FOD8maR58JE0lohZk5fxUi7kLLMWNAxW48Jw4aopY= +github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220601212727-0bb6b1f83a9c/go.mod h1:T6yhM+kWrFxiP6F3hh8lh9DcocHfmv48P4ITnjLhKSk= +github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220601212727-0bb6b1f83a9c h1:TCJ+dn+Q2g80WxiuGc84Jiu++rxuxUnGI1VRYLbg5Ns= +github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220601212727-0bb6b1f83a9c/go.mod h1:JB2hWVxUjhMshUDNVQKfn4dzhhawOO+i3XjlhLMV5MM= github.com/onflow/flow-emulator v0.31.2-0.20220513151845-ef7513cb1cd0 h1:QfLN/O4VkLhrUoTVmU5+/e5gIxI3gRpOjhz06UKsSEQ= github.com/onflow/flow-emulator v0.31.2-0.20220513151845-ef7513cb1cd0/go.mod h1:XLZfTEaYX2151TcTvZPIj7taBN0qpsOoyvEBRZzlXtQ= github.com/onflow/flow-ft/lib/go/contracts v0.5.0 h1:Cg4gHGVblxcejfNNG5Mfj98Wf4zbY76O0Y28QB0766A= From d567dd86d98306fb13a2ef3cfcc888087112eaf8 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Fri, 17 Jun 2022 10:55:13 -0400 Subject: [PATCH 006/223] add placeholder SlashingViolationsConsumer for the network package - return sentinel errors from IsAuthorizedSender func - use slashing violations consumer to log warnings for potential slashable behavior in AuthorizedSenderValidator MessageValidator - update godoc comments --- network/slashing_violations_consumer.go | 50 +++++++++++++ .../pubsub/authorized_sender_validator.go | 72 +++++++++++-------- 2 files changed, 91 insertions(+), 31 deletions(-) create mode 100644 network/slashing_violations_consumer.go diff --git a/network/slashing_violations_consumer.go b/network/slashing_violations_consumer.go new file mode 100644 index 00000000000..42a8450d6c7 --- /dev/null +++ b/network/slashing_violations_consumer.go @@ -0,0 +1,50 @@ +package network + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/rs/zerolog" +) + +// SlashingViolationsConsumer is a struct that logs a message for any slashable offences. +// This struct will be updated in the future when slashing is implemented. +type SlashingViolationsConsumer struct { + log zerolog.Logger +} + +// NewSlashingViolationsConsumer returns a new SlashingViolationsConsumer +func NewSlashingViolationsConsumer(log zerolog.Logger) *SlashingViolationsConsumer { + return &SlashingViolationsConsumer{log} +} + +// OnUnAuthorizedSenderError logs a warning for unauthorized sender error +func (c *SlashingViolationsConsumer) OnUnAuthorizedSenderError(identity *flow.Identity, peerID, msgType string, err error) { + c.log.Warn(). + Err(err). + Str("peer_id", peerID). + Str("role", identity.Role.String()). + Str("node_id", identity.NodeID.String()). + Str("message_type", msgType). + Msg("potential slashable offense") +} + +// OnUnknownMsgTypeError logs a warning for unknown message type error +func (c *SlashingViolationsConsumer) OnUnknownMsgTypeError(identity *flow.Identity, peerID, msgType string, err error) { + c.log.Warn(). + Err(err). + Str("peer_id", peerID). + Str("role", identity.Role.String()). + Str("node_id", identity.NodeID.String()). + Str("message_type", msgType). + Msg("potential slashable offense") +} + +// OnSenderEjectedError logs a warning for sender ejected error +func (c *SlashingViolationsConsumer) OnSenderEjectedError(identity *flow.Identity, peerID, msgType string, err error) { + c.log.Warn(). + Err(err). + Str("peer_id", peerID). + Str("role", identity.Role.String()). + Str("node_id", identity.NodeID.String()). + Str("message_type", msgType). + Msg("potential slashable offense") +} diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index 93b7c409d49..a942a21e778 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -4,6 +4,7 @@ package validator import ( "context" + "errors" "fmt" "github.com/libp2p/go-libp2p-core/peer" @@ -15,48 +16,48 @@ import ( "github.com/onflow/flow-go/model/flow" ) -// AuthorizedSenderValidator using the getIdentity func will check if the role of the sender -// is part of the authorized roles list for the channel being communicated on. A node is considered -// to be authorized to send a message if the following are true. -// 1. The node is staked. -// 2. The node is not ejected. -// 3. The message type is a known message type (can be decoded with network codec). -// 4. The message is authorized to be sent on channel. -// 4. The sender role is authorized to send message channel. -// 5. The sender role is authorized to participate on channel. +var ( + ErrUnauthorizedSender = errors.New("validation failed: sender is not authorized to send this message type") + ErrSenderEjected = errors.New("validation failed: sender is an ejected node") + ErrUnknownMessageType = errors.New("validation failed: failed to get message auth config") + ErrIdentityUnverified = errors.New("validation failed: could not verify identity of sender") +) + +// AuthorizedSenderValidator returns a MessageValidator that will check if the sender of a message is authorized to send the message. +// The MessageValidator returned will use the getIdentity to get the flow identity for the sender, asserting that the sender is a staked node. +// If the sender is an unstaked node the message is rejected. IsAuthorizedSender is used to perform further message validation. If validation +// fails the message is rejected, if the validation error is an expected error slashing data is collected before the message is rejected. func AuthorizedSenderValidator(log zerolog.Logger, channel network.Channel, getIdentity func(peer.ID) (*flow.Identity, bool)) MessageValidator { - log = log.With(). + slashingViolationsConsumer := network.NewSlashingViolationsConsumer(log.With(). Str("component", "authorized_sender_validator"). Str("network_channel", channel.String()). - Logger() + Logger()) return func(ctx context.Context, from peer.ID, msg interface{}) pubsub.ValidationResult { identity, ok := getIdentity(from) if !ok { - log.Warn().Str("peer_id", from.String()).Msg("could not verify identity of sender") + log.Warn().Err(ErrIdentityUnverified).Str("peer_id", from.String()).Msg("rejecting message") return pubsub.ValidationReject } - // redundant check if the node is ejected so that we can fail fast before decoding - if identity.Ejected { - log.Warn(). - Err(fmt.Errorf("sender %s is an ejected node", identity.NodeID)). - Str("peer_id", from.String()). - Str("role", identity.Role.String()). - Str("node_id", identity.NodeID.String()). - Msg("rejecting message") + msgType, err := IsAuthorizedSender(identity, channel, msg) + if errors.Is(err, ErrUnauthorizedSender) { + slashingViolationsConsumer.OnUnAuthorizedSenderError(identity, from.String(), msgType, err) return pubsub.ValidationReject - } - - if what, err := IsAuthorizedSender(identity, channel, msg); err != nil { + } else if errors.Is(err, ErrUnknownMessageType) { + slashingViolationsConsumer.OnUnknownMsgTypeError(identity, from.String(), msgType, err) + return pubsub.ValidationReject + } else if errors.Is(err, ErrSenderEjected) { + slashingViolationsConsumer.OnSenderEjectedError(identity, from.String(), msgType, err) + return pubsub.ValidationReject + } else if err != nil { log.Warn(). Err(err). Str("peer_id", from.String()). Str("role", identity.Role.String()). Str("node_id", identity.NodeID.String()). - Str("message_type", what). - Msg("sender is not authorized, rejecting message") - + Str("message_type", msgType). + Msg("unexpected error during message validation") return pubsub.ValidationReject } @@ -64,16 +65,25 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel network.Channel, getI } } -// IsAuthorizedSender checks if node is an authorized role. +// IsAuthorizedSender performs network authorization validation. This func will assert the following; +// 1. The node is not ejected. +// 2. Using the message auth config +// A. The message is authorized to be sent on channel. +// B. The sender role is authorized to send message channel. +// C. The sender role is authorized to participate on channel. +// Expected error returns during normal operations: +// * ErrSenderEjected: if identity of sender is ejected +// * ErrUnknownMessageType: if retrieving the message auth config for msg fails +// * ErrUnauthorizedSender: if the message auth config validation for msg fails func IsAuthorizedSender(identity *flow.Identity, channel network.Channel, msg interface{}) (string, error) { if identity.Ejected { - return "", fmt.Errorf("sender %s is an ejected node", identity.NodeID) + return "", ErrSenderEjected } - // get message code configuration + // get message auth config conf, err := network.GetMessageAuthConfig(msg) if err != nil { - return "", fmt.Errorf("failed to get message auth config: %w", err) + return "", fmt.Errorf("%s: %w", err, ErrUnknownMessageType) } // handle special case for cluster prefixed channels @@ -82,7 +92,7 @@ func IsAuthorizedSender(identity *flow.Identity, channel network.Channel, msg in } if err := conf.IsAuthorized(identity.Role, channel); err != nil { - return conf.String, err + return conf.String, fmt.Errorf("%s: %w", err, ErrUnauthorizedSender) } return conf.String, nil From d9fdf5a919fd06083d9ae89e580dae4e011c0fe9 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Fri, 17 Jun 2022 15:57:37 -0400 Subject: [PATCH 007/223] use nodeFixture instead of createNode to more closely resemble a real libP2P node --- network/p2p/fixture_test.go | 26 ++++++++------ network/p2p/topic_validator_test.go | 54 ++++++++++------------------- 2 files changed, 35 insertions(+), 45 deletions(-) diff --git a/network/p2p/fixture_test.go b/network/p2p/fixture_test.go index 5b2a8e0c27c..e7021500cd6 100644 --- a/network/p2p/fixture_test.go +++ b/network/p2p/fixture_test.go @@ -2,7 +2,6 @@ package p2p_test import ( "context" - "fmt" "net" "testing" "time" @@ -43,6 +42,7 @@ type nodeFixtureParameters struct { address string dhtOptions []dht.Option peerFilter p2p.PeerFilter + role flow.Role } type nodeFixtureParameterOption func(*nodeFixtureParameters) @@ -83,12 +83,18 @@ func withPeerFilter(filter p2p.PeerFilter) nodeFixtureParameterOption { } } +func withRole(role flow.Role) nodeFixtureParameterOption { + return func(p *nodeFixtureParameters) { + p.role = role + } +} + // nodeFixture is a test fixture that creates a single libp2p node with the given key, spork id, and options. // It returns the node and its identity. func nodeFixture( t *testing.T, ctx context.Context, - sporkId flow.Identifier, + sporkID flow.Identifier, dhtPrefix string, opts ...nodeFixtureParameterOption, ) (*p2p.Node, flow.Identity) { @@ -108,16 +114,17 @@ func nodeFixture( identity := unittest.IdentityFixture( unittest.WithNetworkingKey(parameters.key.PublicKey()), - unittest.WithAddress(parameters.address)) + unittest.WithAddress(parameters.address), + unittest.WithRole(parameters.role)) noopMetrics := metrics.NewNoopCollector() connManager := p2p.NewConnManager(logger, noopMetrics) - builder := p2p.NewNodeBuilder(logger, parameters.address, parameters.key, sporkId). + builder := p2p.NewNodeBuilder(logger, parameters.address, parameters.key, sporkID). SetConnectionManager(connManager). SetPubSub(pubsub.NewGossipSub). SetRoutingSystem(func(c context.Context, h host.Host) (routing.Routing, error) { - return p2p.NewDHT(c, h, protocol.ID(unicast.FlowDHTProtocolIDPrefix+sporkId.String()+"/"+dhtPrefix), parameters.dhtOptions...) + return p2p.NewDHT(c, h, protocol.ID(unicast.FlowDHTProtocolIDPrefix+sporkID.String()+"/"+dhtPrefix), parameters.dhtOptions...) }) if parameters.peerFilter != nil { @@ -131,16 +138,15 @@ func nodeFixture( err = n.WithDefaultUnicastProtocol(parameters.handlerFunc, parameters.unicasts) require.NoError(t, err) - require.Eventuallyf(t, func() bool { - ip, p, err := n.GetIPPort() - return err == nil && ip != "" && p != "" - }, 3*time.Second, ticksForAssertEventually, fmt.Sprintf("could not start node %s", identity.NodeID.String())) + //require.Eventuallyf(t, func() bool { + // ip, p, err := n.GetIPPort() + // return err == nil && ip != "" && p != "" + //}, 3*time.Second, ticksForAssertEventually, fmt.Sprintf("could not start node %s", identity.NodeID.String())) // get the actual IP and port that have been assigned by the subsystem ip, port, err := n.GetIPPort() require.NoError(t, err) identity.Address = ip + ":" + port - return n, *identity } diff --git a/network/p2p/topic_validator_test.go b/network/p2p/topic_validator_test.go index 2ddba078e5d..672fe1340af 100644 --- a/network/p2p/topic_validator_test.go +++ b/network/p2p/topic_validator_test.go @@ -26,19 +26,15 @@ import ( // TestAuthorizedSenderValidator_Unauthorized tests that the authorized sender validator rejects messages from nodes that are not authorized to send the message func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { sporkId := unittest.IdentifierFixture() - identity1, privateKey1 := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleConsensus)) - sn1 := createNode(t, identity1.NodeID, privateKey1, sporkId) - identity2, privateKey2 := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleConsensus)) - sn2 := createNode(t, identity2.NodeID, privateKey2, sporkId) - - identity3, privateKey3 := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleAccess)) - an1 := createNode(t, identity3.NodeID, privateKey3, sporkId) + sn1, identity1 := nodeFixture(t, context.Background(), sporkId, "TestAuthorizedSenderValidator_Unauthorized", withRole(flow.RoleConsensus)) + sn2, identity2 := nodeFixture(t, context.Background(), sporkId, "TestAuthorizedSenderValidator_Unauthorized", withRole(flow.RoleConsensus)) + an1, identity3 := nodeFixture(t, context.Background(), sporkId, "TestAuthorizedSenderValidator_Unauthorized", withRole(flow.RoleAccess)) channel := network.ConsensusCommittee topic := network.TopicFromChannel(channel, sporkId) - ids := flow.IdentityList{identity1, identity2, identity3} + ids := flow.IdentityList{&identity1, &identity2, &identity3} translator, err := p2p.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) @@ -134,17 +130,15 @@ func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { // TestAuthorizedSenderValidator_Authorized tests that the authorized sender validator rejects messages being sent on the wrong channel func TestAuthorizedSenderValidator_InvalidMsg(t *testing.T) { sporkId := unittest.IdentifierFixture() - identity1, privateKey1 := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleConsensus)) - sn1 := createNode(t, identity1.NodeID, privateKey1, sporkId) - identity2, privateKey2 := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleConsensus)) - sn2 := createNode(t, identity2.NodeID, privateKey2, sporkId) + sn1, identity1 := nodeFixture(t, context.Background(), sporkId, "TestAuthorizedSenderValidator_InvalidMsg", withRole(flow.RoleConsensus)) + sn2, identity2 := nodeFixture(t, context.Background(), sporkId, "TestAuthorizedSenderValidator_InvalidMsg", withRole(flow.RoleConsensus)) // try to publish BlockProposal on invalid SyncCommittee channel channel := network.SyncCommittee topic := network.TopicFromChannel(channel, sporkId) - ids := flow.IdentityList{identity1, identity2} + ids := flow.IdentityList{&identity1, &identity2} translator, err := p2p.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) @@ -181,7 +175,7 @@ func TestAuthorizedSenderValidator_InvalidMsg(t *testing.T) { len(sn2.ListPeers(topic.String())) > 0 }, 3*time.Second, 100*time.Millisecond) - timedCtx, cancel5s := context.WithTimeout(context.Background(), 5*time.Second) + timedCtx, cancel5s := context.WithTimeout(context.Background(), 60*time.Second) defer cancel5s() // create a dummy block proposal to publish from our SN node header := unittest.BlockHeaderFixture() @@ -207,17 +201,15 @@ func TestAuthorizedSenderValidator_InvalidMsg(t *testing.T) { // TestAuthorizedSenderValidator_Authorized tests that the authorized sender validator rejects messages from unstaked nodes func TestAuthorizedSenderValidator_Unstaked(t *testing.T) { sporkId := unittest.IdentifierFixture() - identity1, privateKey1 := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleConsensus)) - sn1 := createNode(t, identity1.NodeID, privateKey1, sporkId) - identity2, privateKey2 := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleConsensus)) - sn2 := createNode(t, identity2.NodeID, privateKey2, sporkId) + sn1, identity1 := nodeFixture(t, context.Background(), sporkId, "consensus_1", withRole(flow.RoleConsensus)) + sn2, _ := nodeFixture(t, context.Background(), sporkId, "consensus_2", withRole(flow.RoleConsensus)) channel := network.ConsensusCommittee topic := network.TopicFromChannel(channel, sporkId) //NOTE: identity2 is not in the ids list simulating an un-staked node - ids := flow.IdentityList{identity1} + ids := flow.IdentityList{&identity1} translator, err := p2p.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) @@ -280,19 +272,15 @@ func TestAuthorizedSenderValidator_Unstaked(t *testing.T) { // TestAuthorizedSenderValidator_Ejected tests that the authorized sender validator rejects messages from nodes that are ejected func TestAuthorizedSenderValidator_Ejected(t *testing.T) { sporkId := unittest.IdentifierFixture() - identity1, privateKey1 := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleConsensus)) - sn1 := createNode(t, identity1.NodeID, privateKey1, sporkId) - identity2, privateKey2 := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleConsensus)) - sn2 := createNode(t, identity2.NodeID, privateKey2, sporkId) - - identity3, privateKey3 := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleAccess)) - an1 := createNode(t, identity3.NodeID, privateKey3, sporkId) + sn1, identity1 := nodeFixture(t, context.Background(), sporkId, "consensus_1", withRole(flow.RoleConsensus)) + sn2, identity2 := nodeFixture(t, context.Background(), sporkId, "consensus_2", withRole(flow.RoleConsensus)) + an1, identity3 := nodeFixture(t, context.Background(), sporkId, "access_1", withRole(flow.RoleAccess)) channel := network.ConsensusCommittee topic := network.TopicFromChannel(channel, sporkId) - ids := flow.IdentityList{identity1, identity2, identity3} + ids := flow.IdentityList{&identity1, &identity2, &identity3} translator, err := p2p.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) @@ -377,19 +365,15 @@ func TestAuthorizedSenderValidator_Ejected(t *testing.T) { // TestAuthorizedSenderValidator_ClusterChannel tests that the authorized sender validator correctly validates messages sent on cluster channels func TestAuthorizedSenderValidator_ClusterChannel(t *testing.T) { sporkId := unittest.IdentifierFixture() - identity1, privateKey1 := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleCollection)) - ln1 := createNode(t, identity1.NodeID, privateKey1, sporkId) - - identity2, privateKey2 := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleCollection)) - ln2 := createNode(t, identity2.NodeID, privateKey2, sporkId) - identity3, privateKey3 := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleCollection)) - ln3 := createNode(t, identity3.NodeID, privateKey3, sporkId) + ln1, identity1 := nodeFixture(t, context.Background(), sporkId, "collection_1", withRole(flow.RoleCollection)) + ln2, identity2 := nodeFixture(t, context.Background(), sporkId, "collection_2", withRole(flow.RoleCollection)) + ln3, identity3 := nodeFixture(t, context.Background(), sporkId, "collection_3", withRole(flow.RoleCollection)) channel := network.ChannelSyncCluster(flow.Testnet) topic := network.TopicFromChannel(channel, sporkId) - ids := flow.IdentityList{identity1, identity2} + ids := flow.IdentityList{&identity1, &identity2, &identity3} translator, err := p2p.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) From 3488033cdc9bc7eb4f1784d935755e42e25cacc6 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Fri, 17 Jun 2022 17:12:18 -0400 Subject: [PATCH 008/223] add unit test coverage for all happy & sad paths for IsAuthorizedSender - add Interface field to MsgAuthConfig --- network/message_authorization_config.go | 145 +++++++++++------- .../authorized_sender_validator_test.go | 102 ++++++++++++ 2 files changed, 195 insertions(+), 52 deletions(-) create mode 100644 network/validator/pubsub/authorized_sender_validator_test.go diff --git a/network/message_authorization_config.go b/network/message_authorization_config.go index a0b1575d441..2362fca464a 100644 --- a/network/message_authorization_config.go +++ b/network/message_authorization_config.go @@ -1,6 +1,7 @@ package network import ( + "errors" "fmt" "github.com/onflow/flow-go/model/flow" @@ -12,14 +13,15 @@ import ( // is represented as a map from network channel -> list of all roles allowed to send the message on // the channel. type MsgAuthConfig struct { - String string - config map[Channel]flow.RoleList + String string + Interface interface{} + Config map[Channel]flow.RoleList } // IsAuthorized checks if the specified role is authorized to send the message on channel and // asserts that the message is authorized to be sent on channel. func (m MsgAuthConfig) IsAuthorized(role flow.Role, channel Channel) error { - authorizedRoles, ok := m.config[channel] + authorizedRoles, ok := m.Config[channel] if !ok { return fmt.Errorf("message (%s) is not authorized to be sent on channel (%s)", m.String, channel) } @@ -32,53 +34,62 @@ func (m MsgAuthConfig) IsAuthorized(role flow.Role, channel Channel) error { } var ( + ErrUnknownMsgType = errors.New("could not get authorization Config for unknown message type") + // consensus blockProposal = MsgAuthConfig{ - String: "BlockProposal", - config: map[Channel]flow.RoleList{ + String: "BlockProposal", + Interface: &messages.BlockProposal{}, + Config: map[Channel]flow.RoleList{ ConsensusCommittee: {flow.RoleConsensus}, PushBlocks: {flow.RoleConsensus}, // channel alias ReceiveBlocks = PushBlocks }, } blockVote = MsgAuthConfig{ - String: "BlockVote", - config: map[Channel]flow.RoleList{ + String: "BlockVote", + Interface: &messages.BlockVote{}, + Config: map[Channel]flow.RoleList{ ConsensusCommittee: {flow.RoleConsensus}, }, } // protocol state sync syncRequest = MsgAuthConfig{ - String: "SyncRequest", - config: map[Channel]flow.RoleList{ + String: "SyncRequest", + Interface: &messages.SyncRequest{}, + Config: map[Channel]flow.RoleList{ SyncCommittee: flow.Roles(), SyncClusterPrefix: flow.Roles(), }, } syncResponse = MsgAuthConfig{ - String: "SyncResponse", - config: map[Channel]flow.RoleList{ + String: "SyncResponse", + Interface: &messages.SyncResponse{}, + Config: map[Channel]flow.RoleList{ SyncCommittee: flow.Roles(), SyncClusterPrefix: flow.Roles(), }, } rangeRequest = MsgAuthConfig{ - String: "RangeRequest", - config: map[Channel]flow.RoleList{ + String: "RangeRequest", + Interface: &messages.RangeRequest{}, + Config: map[Channel]flow.RoleList{ SyncCommittee: flow.Roles(), SyncClusterPrefix: flow.Roles(), }, } batchRequest = MsgAuthConfig{ - String: "BatchRequest", - config: map[Channel]flow.RoleList{ + String: "BatchRequest", + Interface: &messages.BatchRequest{}, + Config: map[Channel]flow.RoleList{ SyncCommittee: flow.Roles(), SyncClusterPrefix: flow.Roles(), }, } blockResponse = MsgAuthConfig{ - String: "BlockResponse", - config: map[Channel]flow.RoleList{ + String: "BlockResponse", + Interface: &messages.BlockResponse{}, + Config: map[Channel]flow.RoleList{ SyncCommittee: flow.Roles(), SyncClusterPrefix: flow.Roles(), }, @@ -86,54 +97,62 @@ var ( // cluster consensus clusterBlockProposal = MsgAuthConfig{ - String: "ClusterBlockProposal", - config: map[Channel]flow.RoleList{ + String: "ClusterBlockProposal", + Interface: &messages.ClusterBlockProposal{}, + Config: map[Channel]flow.RoleList{ ConsensusClusterPrefix: {flow.RoleCollection}, }, } clusterBlockVote = MsgAuthConfig{ - String: "ClusterBlockVote", - config: map[Channel]flow.RoleList{ + String: "ClusterBlockVote", + Interface: &messages.ClusterBlockVote{}, + Config: map[Channel]flow.RoleList{ ConsensusClusterPrefix: {flow.RoleCollection}, }, } clusterBlockResponse = MsgAuthConfig{ - String: "ClusterBlockResponse", - config: map[Channel]flow.RoleList{ + String: "ClusterBlockResponse", + Interface: &messages.ClusterBlockResponse{}, + Config: map[Channel]flow.RoleList{ ConsensusClusterPrefix: {flow.RoleCollection}, }, } // collections, guarantees & transactions collectionGuarantee = MsgAuthConfig{ - String: "CollectionGuarantee", - config: map[Channel]flow.RoleList{ + String: "CollectionGuarantee", + Interface: &flow.CollectionGuarantee{}, + Config: map[Channel]flow.RoleList{ PushGuarantees: {flow.RoleCollection}, // channel alias ReceiveGuarantees = PushGuarantees }, } transaction = MsgAuthConfig{ - String: "Transaction", - config: map[Channel]flow.RoleList{ + String: "Transaction", + Interface: &flow.Transaction{}, + Config: map[Channel]flow.RoleList{ PushTransactions: {flow.RoleCollection}, // channel alias ReceiveTransactions = PushTransactions }, } transactionBody = MsgAuthConfig{ - String: "TransactionBody", - config: map[Channel]flow.RoleList{ + String: "TransactionBody", + Interface: &flow.TransactionBody{}, + Config: map[Channel]flow.RoleList{ PushTransactions: {flow.RoleCollection}, // channel alias ReceiveTransactions = PushTransactions }, } // core messages for execution & verification executionReceipt = MsgAuthConfig{ - String: "ExecutionReceipt", - config: map[Channel]flow.RoleList{ + String: "ExecutionReceipt", + Interface: &flow.ExecutionReceipt{}, + Config: map[Channel]flow.RoleList{ PushReceipts: {flow.RoleExecution}, // channel alias ReceiveReceipts = PushReceipts }, } resultApproval = MsgAuthConfig{ - String: "ResultApproval", - config: map[Channel]flow.RoleList{ + String: "ResultApproval", + Interface: &flow.ResultApproval{}, + Config: map[Channel]flow.RoleList{ PushApprovals: {flow.RoleVerification}, // channel alias ReceiveApprovals = PushApprovals }, } @@ -141,17 +160,18 @@ var ( // [deprecated] execution state synchronization executionStateSyncRequest = MsgAuthConfig{ String: "ExecutionStateSyncRequest", - config: nil, + Config: nil, } executionStateDelta = MsgAuthConfig{ String: "ExecutionStateDelta", - config: nil, + Config: nil, } // data exchange for execution of blocks chunkDataRequest = MsgAuthConfig{ - String: "ChunkDataRequest", - config: map[Channel]flow.RoleList{ + String: "ChunkDataRequest", + Interface: &messages.ChunkDataRequest{}, + Config: map[Channel]flow.RoleList{ ProvideChunks: {flow.RoleVerification}, // channel alias RequestChunks = ProvideChunks RequestCollections: {flow.RoleVerification}, RequestApprovalsByChunk: {flow.RoleVerification}, @@ -159,8 +179,9 @@ var ( }, } chunkDataResponse = MsgAuthConfig{ - String: "ChunkDataResponse", - config: map[Channel]flow.RoleList{ + String: "ChunkDataResponse", + Interface: &messages.ChunkDataResponse{}, + Config: map[Channel]flow.RoleList{ ProvideChunks: {flow.RoleExecution}, // channel alias RequestChunks = ProvideChunks RequestCollections: {flow.RoleExecution}, RequestApprovalsByChunk: {flow.RoleExecution}, @@ -170,22 +191,25 @@ var ( // result approvals approvalRequest = MsgAuthConfig{ - String: "ApprovalRequest", - config: map[Channel]flow.RoleList{ + String: "ApprovalRequest", + Interface: &messages.ApprovalRequest{}, + Config: map[Channel]flow.RoleList{ ProvideApprovalsByChunk: {flow.RoleConsensus}, }, } approvalResponse = MsgAuthConfig{ - String: "ApprovalResponse", - config: map[Channel]flow.RoleList{ + String: "ApprovalResponse", + Interface: &messages.ApprovalResponse{}, + Config: map[Channel]flow.RoleList{ ProvideApprovalsByChunk: {flow.RoleVerification}, }, } // generic entity exchange engines entityRequest = MsgAuthConfig{ - String: "EntityRequest", - config: map[Channel]flow.RoleList{ + String: "EntityRequest", + Interface: &messages.EntityRequest{}, + Config: map[Channel]flow.RoleList{ RequestChunks: {flow.RoleAccess, flow.RoleConsensus, flow.RoleCollection}, RequestCollections: {flow.RoleAccess, flow.RoleConsensus, flow.RoleCollection}, RequestApprovalsByChunk: {flow.RoleAccess, flow.RoleConsensus, flow.RoleCollection}, @@ -193,8 +217,9 @@ var ( }, } entityResponse = MsgAuthConfig{ - String: "EntityResponse", - config: map[Channel]flow.RoleList{ + String: "EntityResponse", + Interface: &messages.EntityResponse{}, + Config: map[Channel]flow.RoleList{ RequestChunks: {flow.RoleCollection, flow.RoleExecution}, RequestCollections: {flow.RoleCollection, flow.RoleExecution}, RequestApprovalsByChunk: {flow.RoleCollection, flow.RoleExecution}, @@ -204,8 +229,9 @@ var ( // testing echo = MsgAuthConfig{ - String: "echo", - config: map[Channel]flow.RoleList{ + String: "echo", + Interface: &message.TestMessage{}, + Config: map[Channel]flow.RoleList{ TestNetworkChannel: flow.Roles(), TestMetricsChannel: flow.Roles(), }, @@ -213,13 +239,18 @@ var ( // DKG dkgMessage = MsgAuthConfig{ - String: "DKGMessage", - config: map[Channel]flow.RoleList{ + String: "DKGMessage", + Interface: &messages.DKGMessage{}, + Config: map[Channel]flow.RoleList{ DKGCommittee: {flow.RoleConsensus}, }, } ) +// GetMessageAuthConfig checks the underlying type and returns the correct +// message auth Config. +// Expected error returns during normal operations: +// * ErrUnknownMsgType : if underlying type of v does not match any of the known message types func GetMessageAuthConfig(v interface{}) (MsgAuthConfig, error) { switch v.(type) { // consensus @@ -295,6 +326,16 @@ func GetMessageAuthConfig(v interface{}) (MsgAuthConfig, error) { return dkgMessage, nil default: - return MsgAuthConfig{}, fmt.Errorf("could not get authorization config for message with type (%T)", v) + return MsgAuthConfig{}, fmt.Errorf("%w (%T)", ErrUnknownMsgType, v) + } +} + +// GetAllMessageAuthConfigs returns a list with all message auth configurations +func GetAllMessageAuthConfigs() []MsgAuthConfig { + return []MsgAuthConfig{ + blockProposal, blockVote, syncRequest, syncResponse, rangeRequest, batchRequest, + blockResponse, clusterBlockProposal, clusterBlockVote, clusterBlockResponse, collectionGuarantee, + transaction, transactionBody, executionReceipt, resultApproval, + chunkDataRequest, chunkDataResponse, approvalRequest, approvalResponse, entityRequest, entityResponse, echo, dkgMessage, } } diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go new file mode 100644 index 00000000000..5362dccc0e9 --- /dev/null +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -0,0 +1,102 @@ +package validator + +import ( + "fmt" + "testing" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/utils/unittest" + "github.com/stretchr/testify/require" +) + +type TestCase struct { + Identity *flow.Identity + Channel network.Channel + Message interface{} + MessageStr string +} + +var ( + happyPathTestCases = make([]TestCase, 0) + sadPathTestCases = make([]TestCase, 0) +) + +// TestIsAuthorizedSender_AuthorizedSender checks that IsAuthorizedSender does not return false positive +// validation errors for all possible valid combinations (authorized sender role, message type). +func TestIsAuthorizedSender_AuthorizedSender(t *testing.T) { + for _, c := range happyPathTestCases { + str := fmt.Sprintf("role (%s) should be authorized to send message type (%s) on channel (%s)", c.Identity.Role, c.MessageStr, c.Channel) + t.Run(str, func(t *testing.T) { + msgType, err := IsAuthorizedSender(c.Identity, c.Channel, c.Message) + require.NoError(t, err) + + require.Equal(t, c.MessageStr, msgType) + }) + } +} + +// TestIsAuthorizedSender_UnAuthorizedSender checks that IsAuthorizedSender return's ErrUnauthorizedSender +// validation error for all possible invalid combinations (unauthorized sender role, message type). +func TestIsAuthorizedSender_UnAuthorizedSender(t *testing.T) { + for _, c := range sadPathTestCases { + str := fmt.Sprintf("role (%s) should not be authorized to send message type (%s) on channel (%s)", c.Identity.Role, c.MessageStr, c.Channel) + t.Run(str, func(t *testing.T) { + msgType, err := IsAuthorizedSender(c.Identity, c.Channel, c.Message) + require.ErrorIs(t, err, ErrUnauthorizedSender) + require.Equal(t, c.MessageStr, msgType) + }) + } +} + +// TestIsAuthorizedSender_ValidationFailure checks that IsAuthorizedSender returns the expected validation error. +func TestIsAuthorizedSender_ValidationFailure(t *testing.T) { + t.Run("sender is ejected", func(t *testing.T) { + identity := unittest.IdentityFixture() + identity.Ejected = true + msgType, err := IsAuthorizedSender(identity, network.Channel(""), nil) + require.ErrorIs(t, err, ErrSenderEjected) + require.Equal(t, "", msgType) + }) + + t.Run("unknown message type", func(t *testing.T) { + identity := unittest.IdentityFixture() + msgType, err := IsAuthorizedSender(identity, network.Channel(""), nil) + require.ErrorIs(t, err, ErrUnknownMessageType) + require.Equal(t, "", msgType) + }) +} + +// initializeTestCases initializes happy and sad path test cases for checking authorized and unauthorized role message combinations. +func initializeTestCases() { + for _, c := range network.GetAllMessageAuthConfigs() { + for channel, authorizedRoles := range c.Config { + for _, role := range flow.Roles() { + identity := unittest.IdentityFixture(unittest.WithRole(role)) + if authorizedRoles.Contains(role) { + // test cases for validation success happy path + tc := TestCase{ + Identity: identity, + Channel: channel, + Message: c.Interface, + MessageStr: c.String, + } + happyPathTestCases = append(happyPathTestCases, tc) + } else { + // test cases for validation unsuccessful sad path + tc := TestCase{ + Identity: identity, + Channel: channel, + Message: c.Interface, + MessageStr: c.String, + } + sadPathTestCases = append(sadPathTestCases, tc) + } + } + } + } +} + +func init() { + initializeTestCases() +} From 36f894ae57c81d7a8d57c701742cd2506e69ae9f Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Fri, 17 Jun 2022 17:14:33 -0400 Subject: [PATCH 009/223] fix imports --- network/slashing_violations_consumer.go | 3 ++- network/validator/pubsub/authorized_sender_validator_test.go | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/network/slashing_violations_consumer.go b/network/slashing_violations_consumer.go index 42a8450d6c7..3f3a3d32063 100644 --- a/network/slashing_violations_consumer.go +++ b/network/slashing_violations_consumer.go @@ -1,8 +1,9 @@ package network import ( - "github.com/onflow/flow-go/model/flow" "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" ) // SlashingViolationsConsumer is a struct that logs a message for any slashable offences. diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index 5362dccc0e9..52f0490ae67 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -4,10 +4,11 @@ import ( "fmt" "testing" + "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/utils/unittest" - "github.com/stretchr/testify/require" ) type TestCase struct { From 9d4bd88e2b61b9ae41f844fab4f8e338beceffcb Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Fri, 17 Jun 2022 17:46:18 -0400 Subject: [PATCH 010/223] Update message_authorization_config.go --- network/message_authorization_config.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/network/message_authorization_config.go b/network/message_authorization_config.go index 2362fca464a..ea30a0448fb 100644 --- a/network/message_authorization_config.go +++ b/network/message_authorization_config.go @@ -20,21 +20,26 @@ type MsgAuthConfig struct { // IsAuthorized checks if the specified role is authorized to send the message on channel and // asserts that the message is authorized to be sent on channel. +// Expected error returns during normal operations: +// * ErrUnauthorizedMessageOnChannel: if channel does not exist in message config +// * ErrUnauthorizedRole: if list of authorized roles for message config does not include role func (m MsgAuthConfig) IsAuthorized(role flow.Role, channel Channel) error { authorizedRoles, ok := m.Config[channel] if !ok { - return fmt.Errorf("message (%s) is not authorized to be sent on channel (%s)", m.String, channel) + return ErrUnauthorizedMessageOnChannel } if !authorizedRoles.Contains(role) { - return fmt.Errorf("sender with role (%s) is not authorized to send message (%s) on channel (%s)", role, m.String, channel) + return ErrUnauthorizedRole } return nil } var ( - ErrUnknownMsgType = errors.New("could not get authorization Config for unknown message type") + ErrUnknownMsgType = errors.New("could not get authorization Config for unknown message type") + ErrUnauthorizedMessageOnChannel = errors.New("message is not authorized to be sent on channel") + ErrUnauthorizedRole = errors.New("sender with role (%s) is not authorized to send message (%s) on channel (%s)") // consensus blockProposal = MsgAuthConfig{ From 512dc291332dbc8e8ea09a119bc0286a443cdb68 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Fri, 17 Jun 2022 17:48:55 -0400 Subject: [PATCH 011/223] Update fixture_test.go --- network/p2p/fixture_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/network/p2p/fixture_test.go b/network/p2p/fixture_test.go index 1f0d0f4c860..36f221f2c14 100644 --- a/network/p2p/fixture_test.go +++ b/network/p2p/fixture_test.go @@ -217,7 +217,7 @@ func acceptAndHang(t *testing.T, l net.Listener) { // nodesFixture is a test fixture that creates a number of libp2p nodes with the given callback function for stream handling. // It returns the nodes and their identities. -func nodesFixture(t *testing.T, ctx context.Context, sporkId flow.Identifier, dhtPrefix string, count int, opts ...nodeFixtureParameterOption) ([]*p2p.Node, +func nodesFixture(t *testing.T, ctx context.Context, sporkID flow.Identifier, dhtPrefix string, count int, opts ...nodeFixtureParameterOption) ([]*p2p.Node, flow.IdentityList) { // keeps track of errors on creating a node var err error @@ -235,7 +235,7 @@ func nodesFixture(t *testing.T, ctx context.Context, sporkId flow.Identifier, dh var identities flow.IdentityList for i := 0; i < count; i++ { // create a node on localhost with a random port assigned by the OS - node, identity := nodeFixture(t, ctx, sporkId, dhtPrefix, opts...) + node, identity := nodeFixture(t, ctx, sporkID, dhtPrefix, opts...) nodes = append(nodes, node) identities = append(identities, &identity) } From ed07a261042d9ef2464e4e9a4865371745ed1cfd Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Fri, 17 Jun 2022 18:07:56 -0400 Subject: [PATCH 012/223] Update fixture_test.go --- network/p2p/fixture_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/p2p/fixture_test.go b/network/p2p/fixture_test.go index 36f221f2c14..b66171c04a0 100644 --- a/network/p2p/fixture_test.go +++ b/network/p2p/fixture_test.go @@ -125,7 +125,7 @@ func nodeFixture( SetPubSub(pubsub.NewGossipSub). SetRoutingSystem(func(c context.Context, h host.Host) (routing.Routing, error) { return p2p.NewDHT(c, h, - protocol.ID(unicast.FlowDHTProtocolIDPrefix+sporkId.String()+"/"+dhtPrefix), + protocol.ID(unicast.FlowDHTProtocolIDPrefix+sporkID.String()+"/"+dhtPrefix), logger, noopMetrics, parameters.dhtOptions..., From 851ab30f02c62a9ade7d4d47a375a62a1ae73eb3 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 22 Jun 2022 15:37:29 -0400 Subject: [PATCH 013/223] store message auth configs in a map - initialize message auth config map instead of returning list from func - remove fmt characters from err - change Interface struct field to a func that returns new pointer insteader of storing a reference --- network/message_authorization_config.go | 265 +++++++++++------- .../authorized_sender_validator_test.go | 21 +- 2 files changed, 164 insertions(+), 122 deletions(-) diff --git a/network/message_authorization_config.go b/network/message_authorization_config.go index ea30a0448fb..888798db58d 100644 --- a/network/message_authorization_config.go +++ b/network/message_authorization_config.go @@ -14,7 +14,7 @@ import ( // the channel. type MsgAuthConfig struct { String string - Interface interface{} + Interface func() interface{} Config map[Channel]flow.RoleList } @@ -39,61 +39,86 @@ func (m MsgAuthConfig) IsAuthorized(role flow.Role, channel Channel) error { var ( ErrUnknownMsgType = errors.New("could not get authorization Config for unknown message type") ErrUnauthorizedMessageOnChannel = errors.New("message is not authorized to be sent on channel") - ErrUnauthorizedRole = errors.New("sender with role (%s) is not authorized to send message (%s) on channel (%s)") + ErrUnauthorizedRole = errors.New("sender role not authorized to send message on channel") + MessageAuthConfigs map[string]MsgAuthConfig +) + +// init is called first time this package is imported. +// It creates and initializes channelRoleMap and clusterChannelPrefixRoleMap. +func init() { + initializeMessageAuthConfigsMap() +} +func initializeMessageAuthConfigsMap() { + MessageAuthConfigs = make(map[string]MsgAuthConfig) + // consensus - blockProposal = MsgAuthConfig{ - String: "BlockProposal", - Interface: &messages.BlockProposal{}, + MessageAuthConfigs["BlockProposal"] = MsgAuthConfig{ + String: "BlockProposal", + Interface: func() interface{} { + return new(messages.BlockProposal) + }, Config: map[Channel]flow.RoleList{ ConsensusCommittee: {flow.RoleConsensus}, PushBlocks: {flow.RoleConsensus}, // channel alias ReceiveBlocks = PushBlocks }, } - blockVote = MsgAuthConfig{ - String: "BlockVote", - Interface: &messages.BlockVote{}, + MessageAuthConfigs["BlockVote"] = MsgAuthConfig{ + String: "BlockVote", + Interface: func() interface{} { + return new(messages.BlockVote) + }, Config: map[Channel]flow.RoleList{ ConsensusCommittee: {flow.RoleConsensus}, }, } // protocol state sync - syncRequest = MsgAuthConfig{ - String: "SyncRequest", - Interface: &messages.SyncRequest{}, + MessageAuthConfigs["SyncRequest"] = MsgAuthConfig{ + String: "SyncRequest", + Interface: func() interface{} { + return new(messages.SyncRequest) + }, Config: map[Channel]flow.RoleList{ SyncCommittee: flow.Roles(), SyncClusterPrefix: flow.Roles(), }, } - syncResponse = MsgAuthConfig{ - String: "SyncResponse", - Interface: &messages.SyncResponse{}, + MessageAuthConfigs["SyncResponse"] = MsgAuthConfig{ + String: "SyncResponse", + Interface: func() interface{} { + return new(messages.SyncResponse) + }, Config: map[Channel]flow.RoleList{ SyncCommittee: flow.Roles(), SyncClusterPrefix: flow.Roles(), }, } - rangeRequest = MsgAuthConfig{ - String: "RangeRequest", - Interface: &messages.RangeRequest{}, + MessageAuthConfigs["RangeRequest"] = MsgAuthConfig{ + String: "RangeRequest", + Interface: func() interface{} { + return new(messages.RangeRequest) + }, Config: map[Channel]flow.RoleList{ SyncCommittee: flow.Roles(), SyncClusterPrefix: flow.Roles(), }, } - batchRequest = MsgAuthConfig{ - String: "BatchRequest", - Interface: &messages.BatchRequest{}, + MessageAuthConfigs["BatchRequest"] = MsgAuthConfig{ + String: "BatchRequest", + Interface: func() interface{} { + return new(messages.BatchRequest) + }, Config: map[Channel]flow.RoleList{ SyncCommittee: flow.Roles(), SyncClusterPrefix: flow.Roles(), }, } - blockResponse = MsgAuthConfig{ - String: "BlockResponse", - Interface: &messages.BlockResponse{}, + MessageAuthConfigs["BlockResponse"] = MsgAuthConfig{ + String: "BlockResponse", + Interface: func() interface{} { + return new(messages.BlockResponse) + }, Config: map[Channel]flow.RoleList{ SyncCommittee: flow.Roles(), SyncClusterPrefix: flow.Roles(), @@ -101,81 +126,99 @@ var ( } // cluster consensus - clusterBlockProposal = MsgAuthConfig{ - String: "ClusterBlockProposal", - Interface: &messages.ClusterBlockProposal{}, + MessageAuthConfigs["ClusterBlockProposal"] = MsgAuthConfig{ + String: "ClusterBlockProposal", + Interface: func() interface{} { + return new(messages.ClusterBlockProposal) + }, Config: map[Channel]flow.RoleList{ ConsensusClusterPrefix: {flow.RoleCollection}, }, } - clusterBlockVote = MsgAuthConfig{ - String: "ClusterBlockVote", - Interface: &messages.ClusterBlockVote{}, + MessageAuthConfigs["ClusterBlockVote"] = MsgAuthConfig{ + String: "ClusterBlockVote", + Interface: func() interface{} { + return new(messages.ClusterBlockVote) + }, Config: map[Channel]flow.RoleList{ ConsensusClusterPrefix: {flow.RoleCollection}, }, } - clusterBlockResponse = MsgAuthConfig{ - String: "ClusterBlockResponse", - Interface: &messages.ClusterBlockResponse{}, + MessageAuthConfigs["ClusterBlockResponse"] = MsgAuthConfig{ + String: "ClusterBlockResponse", + Interface: func() interface{} { + return new(messages.ClusterBlockResponse) + }, Config: map[Channel]flow.RoleList{ ConsensusClusterPrefix: {flow.RoleCollection}, }, } // collections, guarantees & transactions - collectionGuarantee = MsgAuthConfig{ - String: "CollectionGuarantee", - Interface: &flow.CollectionGuarantee{}, + MessageAuthConfigs["CollectionGuarantee"] = MsgAuthConfig{ + String: "CollectionGuarantee", + Interface: func() interface{} { + return new(flow.CollectionGuarantee) + }, Config: map[Channel]flow.RoleList{ PushGuarantees: {flow.RoleCollection}, // channel alias ReceiveGuarantees = PushGuarantees }, } - transaction = MsgAuthConfig{ - String: "Transaction", - Interface: &flow.Transaction{}, + MessageAuthConfigs["Transaction"] = MsgAuthConfig{ + String: "Transaction", + Interface: func() interface{} { + return new(flow.Transaction) + }, Config: map[Channel]flow.RoleList{ PushTransactions: {flow.RoleCollection}, // channel alias ReceiveTransactions = PushTransactions }, } - transactionBody = MsgAuthConfig{ - String: "TransactionBody", - Interface: &flow.TransactionBody{}, + MessageAuthConfigs["TransactionBody"] = MsgAuthConfig{ + String: "TransactionBody", + Interface: func() interface{} { + return new(flow.TransactionBody) + }, Config: map[Channel]flow.RoleList{ PushTransactions: {flow.RoleCollection}, // channel alias ReceiveTransactions = PushTransactions }, } // core messages for execution & verification - executionReceipt = MsgAuthConfig{ - String: "ExecutionReceipt", - Interface: &flow.ExecutionReceipt{}, + MessageAuthConfigs["ExecutionReceipt"] = MsgAuthConfig{ + String: "ExecutionReceipt", + Interface: func() interface{} { + return new(flow.ExecutionReceipt) + }, Config: map[Channel]flow.RoleList{ PushReceipts: {flow.RoleExecution}, // channel alias ReceiveReceipts = PushReceipts }, } - resultApproval = MsgAuthConfig{ - String: "ResultApproval", - Interface: &flow.ResultApproval{}, + MessageAuthConfigs["ResultApproval"] = MsgAuthConfig{ + String: "ResultApproval", + Interface: func() interface{} { + return new(flow.ResultApproval) + }, Config: map[Channel]flow.RoleList{ PushApprovals: {flow.RoleVerification}, // channel alias ReceiveApprovals = PushApprovals }, } // [deprecated] execution state synchronization - executionStateSyncRequest = MsgAuthConfig{ + MessageAuthConfigs["ExecutionStateSyncRequest"] = MsgAuthConfig{ String: "ExecutionStateSyncRequest", Config: nil, } - executionStateDelta = MsgAuthConfig{ + MessageAuthConfigs["ExecutionStateDelta"] = MsgAuthConfig{ String: "ExecutionStateDelta", Config: nil, } // data exchange for execution of blocks - chunkDataRequest = MsgAuthConfig{ - String: "ChunkDataRequest", - Interface: &messages.ChunkDataRequest{}, + MessageAuthConfigs["ChunkDataRequest"] = MsgAuthConfig{ + String: "ChunkDataRequest", + Interface: func() interface{} { + return new(messages.ChunkDataRequest) + }, Config: map[Channel]flow.RoleList{ ProvideChunks: {flow.RoleVerification}, // channel alias RequestChunks = ProvideChunks RequestCollections: {flow.RoleVerification}, @@ -183,9 +226,11 @@ var ( RequestReceiptsByBlockID: {flow.RoleVerification}, }, } - chunkDataResponse = MsgAuthConfig{ - String: "ChunkDataResponse", - Interface: &messages.ChunkDataResponse{}, + MessageAuthConfigs["ChunkDataResponse"] = MsgAuthConfig{ + String: "ChunkDataResponse", + Interface: func() interface{} { + return new(messages.ChunkDataResponse) + }, Config: map[Channel]flow.RoleList{ ProvideChunks: {flow.RoleExecution}, // channel alias RequestChunks = ProvideChunks RequestCollections: {flow.RoleExecution}, @@ -195,25 +240,31 @@ var ( } // result approvals - approvalRequest = MsgAuthConfig{ - String: "ApprovalRequest", - Interface: &messages.ApprovalRequest{}, + MessageAuthConfigs["ApprovalRequest"] = MsgAuthConfig{ + String: "ApprovalRequest", + Interface: func() interface{} { + return new(messages.ApprovalRequest) + }, Config: map[Channel]flow.RoleList{ ProvideApprovalsByChunk: {flow.RoleConsensus}, }, } - approvalResponse = MsgAuthConfig{ - String: "ApprovalResponse", - Interface: &messages.ApprovalResponse{}, + MessageAuthConfigs["ApprovalResponse"] = MsgAuthConfig{ + String: "ApprovalResponse", + Interface: func() interface{} { + return new(messages.ApprovalResponse) + }, Config: map[Channel]flow.RoleList{ ProvideApprovalsByChunk: {flow.RoleVerification}, }, } // generic entity exchange engines - entityRequest = MsgAuthConfig{ - String: "EntityRequest", - Interface: &messages.EntityRequest{}, + MessageAuthConfigs["EntityRequest"] = MsgAuthConfig{ + String: "EntityRequest", + Interface: func() interface{} { + return new(messages.EntityRequest) + }, Config: map[Channel]flow.RoleList{ RequestChunks: {flow.RoleAccess, flow.RoleConsensus, flow.RoleCollection}, RequestCollections: {flow.RoleAccess, flow.RoleConsensus, flow.RoleCollection}, @@ -221,9 +272,11 @@ var ( RequestReceiptsByBlockID: {flow.RoleAccess, flow.RoleConsensus, flow.RoleCollection}, }, } - entityResponse = MsgAuthConfig{ - String: "EntityResponse", - Interface: &messages.EntityResponse{}, + MessageAuthConfigs["EntityResponse"] = MsgAuthConfig{ + String: "EntityResponse", + Interface: func() interface{} { + return new(messages.EntityResponse) + }, Config: map[Channel]flow.RoleList{ RequestChunks: {flow.RoleCollection, flow.RoleExecution}, RequestCollections: {flow.RoleCollection, flow.RoleExecution}, @@ -233,9 +286,11 @@ var ( } // testing - echo = MsgAuthConfig{ - String: "echo", - Interface: &message.TestMessage{}, + MessageAuthConfigs["Echo"] = MsgAuthConfig{ + String: "echo", + Interface: func() interface{} { + return new(message.TestMessage) + }, Config: map[Channel]flow.RoleList{ TestNetworkChannel: flow.Roles(), TestMetricsChannel: flow.Roles(), @@ -243,14 +298,16 @@ var ( } // DKG - dkgMessage = MsgAuthConfig{ - String: "DKGMessage", - Interface: &messages.DKGMessage{}, + MessageAuthConfigs["DKGMessage"] = MsgAuthConfig{ + String: "DKGMessage", + Interface: func() interface{} { + return new(messages.DKGMessage) + }, Config: map[Channel]flow.RoleList{ DKGCommittee: {flow.RoleConsensus}, }, } -) +} // GetMessageAuthConfig checks the underlying type and returns the correct // message auth Config. @@ -260,87 +317,77 @@ func GetMessageAuthConfig(v interface{}) (MsgAuthConfig, error) { switch v.(type) { // consensus case *messages.BlockProposal: - return blockProposal, nil + return MessageAuthConfigs["BlockProposal"], nil case *messages.BlockVote: - return blockVote, nil + return MessageAuthConfigs["BlockVote"], nil // protocol state sync case *messages.SyncRequest: - return syncRequest, nil + return MessageAuthConfigs["SyncRequest"], nil case *messages.SyncResponse: - return syncResponse, nil + return MessageAuthConfigs["SyncResponse"], nil case *messages.RangeRequest: - return rangeRequest, nil + return MessageAuthConfigs["RangeRequest"], nil case *messages.BatchRequest: - return batchRequest, nil + return MessageAuthConfigs["BatchRequest"], nil case *messages.BlockResponse: - return blockResponse, nil + return MessageAuthConfigs["BlockResponse"], nil // cluster consensus case *messages.ClusterBlockProposal: - return clusterBlockProposal, nil + return MessageAuthConfigs["ClusterBlockProposal"], nil case *messages.ClusterBlockVote: - return clusterBlockVote, nil + return MessageAuthConfigs["ClusterBlockVote"], nil case *messages.ClusterBlockResponse: - return clusterBlockResponse, nil + return MessageAuthConfigs["ClusterBlockResponse"], nil // collections, guarantees & transactions case *flow.CollectionGuarantee: - return collectionGuarantee, nil + return MessageAuthConfigs["CollectionGuarantee"], nil case *flow.TransactionBody: - return transactionBody, nil + return MessageAuthConfigs["TransactionBody"], nil case *flow.Transaction: - return transaction, nil + return MessageAuthConfigs["Transaction"], nil // core messages for execution & verification case *flow.ExecutionReceipt: - return executionReceipt, nil + return MessageAuthConfigs["ExecutionReceipt"], nil case *flow.ResultApproval: - return resultApproval, nil + return MessageAuthConfigs["ResultApproval"], nil // execution state synchronization case *messages.ExecutionStateSyncRequest: - return executionStateSyncRequest, nil + return MessageAuthConfigs["ExecutionStateSyncRequest"], nil case *messages.ExecutionStateDelta: - return executionStateDelta, nil + return MessageAuthConfigs["ExecutionStateDelta"], nil // data exchange for execution of blocks case *messages.ChunkDataRequest: - return chunkDataRequest, nil + return MessageAuthConfigs["ChunkDataRequest"], nil case *messages.ChunkDataResponse: - return chunkDataResponse, nil + return MessageAuthConfigs["ChunkDataResponse"], nil // result approvals case *messages.ApprovalRequest: - return approvalRequest, nil + return MessageAuthConfigs["ApprovalRequest"], nil case *messages.ApprovalResponse: - return approvalResponse, nil + return MessageAuthConfigs["ApprovalResponse"], nil // generic entity exchange engines case *messages.EntityRequest: - return entityRequest, nil + return MessageAuthConfigs["EntityRequest"], nil case *messages.EntityResponse: - return entityResponse, nil + return MessageAuthConfigs["EntityResponse"], nil // testing case *message.TestMessage: - return echo, nil + return MessageAuthConfigs["TestMessage"], nil // dkg case *messages.DKGMessage: - return dkgMessage, nil + return MessageAuthConfigs["DKGMessage"], nil default: return MsgAuthConfig{}, fmt.Errorf("%w (%T)", ErrUnknownMsgType, v) } } - -// GetAllMessageAuthConfigs returns a list with all message auth configurations -func GetAllMessageAuthConfigs() []MsgAuthConfig { - return []MsgAuthConfig{ - blockProposal, blockVote, syncRequest, syncResponse, rangeRequest, batchRequest, - blockResponse, clusterBlockProposal, clusterBlockVote, clusterBlockResponse, collectionGuarantee, - transaction, transactionBody, executionReceipt, resultApproval, - chunkDataRequest, chunkDataResponse, approvalRequest, approvalResponse, entityRequest, entityResponse, echo, dkgMessage, - } -} diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index 52f0490ae67..d9ea467a388 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -70,27 +70,22 @@ func TestIsAuthorizedSender_ValidationFailure(t *testing.T) { // initializeTestCases initializes happy and sad path test cases for checking authorized and unauthorized role message combinations. func initializeTestCases() { - for _, c := range network.GetAllMessageAuthConfigs() { + for _, c := range network.MessageAuthConfigs { for channel, authorizedRoles := range c.Config { for _, role := range flow.Roles() { identity := unittest.IdentityFixture(unittest.WithRole(role)) + tc := TestCase{ + Identity: identity, + Channel: channel, + Message: c.Interface(), + MessageStr: c.String, + } + if authorizedRoles.Contains(role) { // test cases for validation success happy path - tc := TestCase{ - Identity: identity, - Channel: channel, - Message: c.Interface, - MessageStr: c.String, - } happyPathTestCases = append(happyPathTestCases, tc) } else { // test cases for validation unsuccessful sad path - tc := TestCase{ - Identity: identity, - Channel: channel, - Message: c.Interface, - MessageStr: c.String, - } sadPathTestCases = append(sadPathTestCases, tc) } } From e792066086cae7bcac1d438283a75bf74a43769a Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 22 Jun 2022 15:39:11 -0400 Subject: [PATCH 014/223] Update network/slashing_violations_consumer.go Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- network/slashing_violations_consumer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/slashing_violations_consumer.go b/network/slashing_violations_consumer.go index 3f3a3d32063..0e49e842a5d 100644 --- a/network/slashing_violations_consumer.go +++ b/network/slashing_violations_consumer.go @@ -23,7 +23,7 @@ func (c *SlashingViolationsConsumer) OnUnAuthorizedSenderError(identity *flow.Id Err(err). Str("peer_id", peerID). Str("role", identity.Role.String()). - Str("node_id", identity.NodeID.String()). + Str("peer_node_id", identity.NodeID.String()). Str("message_type", msgType). Msg("potential slashable offense") } From 1337349e5278f63878b8d571b5b55c12a953d301 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 22 Jun 2022 15:39:20 -0400 Subject: [PATCH 015/223] Update network/slashing_violations_consumer.go Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- network/slashing_violations_consumer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/slashing_violations_consumer.go b/network/slashing_violations_consumer.go index 0e49e842a5d..65c210957b5 100644 --- a/network/slashing_violations_consumer.go +++ b/network/slashing_violations_consumer.go @@ -34,7 +34,7 @@ func (c *SlashingViolationsConsumer) OnUnknownMsgTypeError(identity *flow.Identi Err(err). Str("peer_id", peerID). Str("role", identity.Role.String()). - Str("node_id", identity.NodeID.String()). + Str("peer_node_id", identity.NodeID.String()). Str("message_type", msgType). Msg("potential slashable offense") } From 79a453ac010f7a145fc505d69cd1148f032566c0 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 22 Jun 2022 15:39:29 -0400 Subject: [PATCH 016/223] Update network/slashing_violations_consumer.go Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- network/slashing_violations_consumer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/slashing_violations_consumer.go b/network/slashing_violations_consumer.go index 65c210957b5..db54278afdc 100644 --- a/network/slashing_violations_consumer.go +++ b/network/slashing_violations_consumer.go @@ -45,7 +45,7 @@ func (c *SlashingViolationsConsumer) OnSenderEjectedError(identity *flow.Identit Err(err). Str("peer_id", peerID). Str("role", identity.Role.String()). - Str("node_id", identity.NodeID.String()). + Str("peer_node_id", identity.NodeID.String()). Str("message_type", msgType). Msg("potential slashable offense") } From 17ba6a4a056ba4ef321a5e4b88fd84b4f1a17444 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 22 Jun 2022 15:39:51 -0400 Subject: [PATCH 017/223] Update network/validator/pubsub/authorized_sender_validator.go Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- network/validator/pubsub/authorized_sender_validator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index a942a21e778..cfa3d17b20f 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -55,7 +55,7 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel network.Channel, getI Err(err). Str("peer_id", from.String()). Str("role", identity.Role.String()). - Str("node_id", identity.NodeID.String()). + Str("peer_node_id", identity.NodeID.String()). Str("message_type", msgType). Msg("unexpected error during message validation") return pubsub.ValidationReject From 170f3e5bf6d95803a39afde8d3d7c3c648a4ae3f Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 22 Jun 2022 16:19:47 -0400 Subject: [PATCH 018/223] attach keyed values to logger before creating slashing violations consumer - log err if unknown err is returned --- network/validator/pubsub/authorized_sender_validator.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index cfa3d17b20f..5122fc9cccb 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -28,10 +28,12 @@ var ( // If the sender is an unstaked node the message is rejected. IsAuthorizedSender is used to perform further message validation. If validation // fails the message is rejected, if the validation error is an expected error slashing data is collected before the message is rejected. func AuthorizedSenderValidator(log zerolog.Logger, channel network.Channel, getIdentity func(peer.ID) (*flow.Identity, bool)) MessageValidator { - slashingViolationsConsumer := network.NewSlashingViolationsConsumer(log.With(). + log = log.With(). Str("component", "authorized_sender_validator"). Str("network_channel", channel.String()). - Logger()) + Logger() + + slashingViolationsConsumer := network.NewSlashingViolationsConsumer(log) return func(ctx context.Context, from peer.ID, msg interface{}) pubsub.ValidationResult { identity, ok := getIdentity(from) @@ -51,7 +53,7 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel network.Channel, getI slashingViolationsConsumer.OnSenderEjectedError(identity, from.String(), msgType, err) return pubsub.ValidationReject } else if err != nil { - log.Warn(). + log.Error(). Err(err). Str("peer_id", from.String()). Str("role", identity.Role.String()). From e7b51c5b265dfe3d2ee7a8c433a677feab9ea116 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 22 Jun 2022 16:54:32 -0400 Subject: [PATCH 019/223] use testify suite pattern - use TestMessage instead of Echo --- network/message_authorization_config.go | 6 +- .../authorized_sender_validator_test.go | 63 ++++++++++--------- 2 files changed, 37 insertions(+), 32 deletions(-) diff --git a/network/message_authorization_config.go b/network/message_authorization_config.go index 888798db58d..5254097f53c 100644 --- a/network/message_authorization_config.go +++ b/network/message_authorization_config.go @@ -51,7 +51,7 @@ func init() { func initializeMessageAuthConfigsMap() { MessageAuthConfigs = make(map[string]MsgAuthConfig) - + // consensus MessageAuthConfigs["BlockProposal"] = MsgAuthConfig{ String: "BlockProposal", @@ -286,8 +286,8 @@ func initializeMessageAuthConfigsMap() { } // testing - MessageAuthConfigs["Echo"] = MsgAuthConfig{ - String: "echo", + MessageAuthConfigs["TestMessage"] = MsgAuthConfig{ + String: "TestMessage", Interface: func() interface{} { return new(message.TestMessage) }, diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index d9ea467a388..964f3f851d8 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" @@ -18,58 +19,66 @@ type TestCase struct { MessageStr string } -var ( - happyPathTestCases = make([]TestCase, 0) - sadPathTestCases = make([]TestCase, 0) -) +func TestIsAuthorizedSender(t *testing.T) { + suite.Run(t, new(TestIsAuthorizedSenderSuite)) +} + +type TestIsAuthorizedSenderSuite struct { + suite.Suite + happyPathTestCases []TestCase + sadPathTestCases []TestCase +} + +func (s *TestIsAuthorizedSenderSuite) SetupTest() { + s.initializeTestCases() +} // TestIsAuthorizedSender_AuthorizedSender checks that IsAuthorizedSender does not return false positive // validation errors for all possible valid combinations (authorized sender role, message type). -func TestIsAuthorizedSender_AuthorizedSender(t *testing.T) { - for _, c := range happyPathTestCases { +func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_AuthorizedSender() { + for _, c := range s.happyPathTestCases { str := fmt.Sprintf("role (%s) should be authorized to send message type (%s) on channel (%s)", c.Identity.Role, c.MessageStr, c.Channel) - t.Run(str, func(t *testing.T) { + s.Run(str, func() { msgType, err := IsAuthorizedSender(c.Identity, c.Channel, c.Message) - require.NoError(t, err) - - require.Equal(t, c.MessageStr, msgType) + s.Require().NoError(err) + s.Require().Equal(c.MessageStr, msgType) }) } } // TestIsAuthorizedSender_UnAuthorizedSender checks that IsAuthorizedSender return's ErrUnauthorizedSender // validation error for all possible invalid combinations (unauthorized sender role, message type). -func TestIsAuthorizedSender_UnAuthorizedSender(t *testing.T) { - for _, c := range sadPathTestCases { +func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_UnAuthorizedSender() { + for _, c := range s.sadPathTestCases { str := fmt.Sprintf("role (%s) should not be authorized to send message type (%s) on channel (%s)", c.Identity.Role, c.MessageStr, c.Channel) - t.Run(str, func(t *testing.T) { + s.Run(str, func() { msgType, err := IsAuthorizedSender(c.Identity, c.Channel, c.Message) - require.ErrorIs(t, err, ErrUnauthorizedSender) - require.Equal(t, c.MessageStr, msgType) + require.ErrorIs(s.T(), err, ErrUnauthorizedSender) + require.Equal(s.T(), c.MessageStr, msgType) }) } } // TestIsAuthorizedSender_ValidationFailure checks that IsAuthorizedSender returns the expected validation error. -func TestIsAuthorizedSender_ValidationFailure(t *testing.T) { - t.Run("sender is ejected", func(t *testing.T) { +func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_ValidationFailure() { + s.Run("sender is ejected", func() { identity := unittest.IdentityFixture() identity.Ejected = true msgType, err := IsAuthorizedSender(identity, network.Channel(""), nil) - require.ErrorIs(t, err, ErrSenderEjected) - require.Equal(t, "", msgType) + require.ErrorIs(s.T(), err, ErrSenderEjected) + require.Equal(s.T(), "", msgType) }) - t.Run("unknown message type", func(t *testing.T) { + s.Run("unknown message type", func() { identity := unittest.IdentityFixture() msgType, err := IsAuthorizedSender(identity, network.Channel(""), nil) - require.ErrorIs(t, err, ErrUnknownMessageType) - require.Equal(t, "", msgType) + require.ErrorIs(s.T(), err, ErrUnknownMessageType) + require.Equal(s.T(), "", msgType) }) } // initializeTestCases initializes happy and sad path test cases for checking authorized and unauthorized role message combinations. -func initializeTestCases() { +func (s *TestIsAuthorizedSenderSuite) initializeTestCases() { for _, c := range network.MessageAuthConfigs { for channel, authorizedRoles := range c.Config { for _, role := range flow.Roles() { @@ -83,16 +92,12 @@ func initializeTestCases() { if authorizedRoles.Contains(role) { // test cases for validation success happy path - happyPathTestCases = append(happyPathTestCases, tc) + s.happyPathTestCases = append(s.happyPathTestCases, tc) } else { // test cases for validation unsuccessful sad path - sadPathTestCases = append(sadPathTestCases, tc) + s.sadPathTestCases = append(s.sadPathTestCases, tc) } } } } } - -func init() { - initializeTestCases() -} From b541540b45659f24cf0e8214f3ae5b98bbfe7593 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 22 Jun 2022 17:20:46 -0400 Subject: [PATCH 020/223] add test that checks unknown message type with embedded flow message --- .../authorized_sender_validator_test.go | 30 ++++++++++++++----- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index 964f3f851d8..ccc8331cb35 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/messages" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/model/flow" @@ -53,8 +53,8 @@ func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_UnAuthorizedSender( str := fmt.Sprintf("role (%s) should not be authorized to send message type (%s) on channel (%s)", c.Identity.Role, c.MessageStr, c.Channel) s.Run(str, func() { msgType, err := IsAuthorizedSender(c.Identity, c.Channel, c.Message) - require.ErrorIs(s.T(), err, ErrUnauthorizedSender) - require.Equal(s.T(), c.MessageStr, msgType) + s.Require().ErrorIs(err, ErrUnauthorizedSender) + s.Require().Equal(c.MessageStr, msgType) }) } } @@ -65,15 +65,31 @@ func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_ValidationFailure() identity := unittest.IdentityFixture() identity.Ejected = true msgType, err := IsAuthorizedSender(identity, network.Channel(""), nil) - require.ErrorIs(s.T(), err, ErrSenderEjected) - require.Equal(s.T(), "", msgType) + s.Require().ErrorIs(err, ErrSenderEjected) + s.Require().Equal("", msgType) }) s.Run("unknown message type", func() { identity := unittest.IdentityFixture() msgType, err := IsAuthorizedSender(identity, network.Channel(""), nil) - require.ErrorIs(s.T(), err, ErrUnknownMessageType) - require.Equal(s.T(), "", msgType) + s.Require().ErrorIs(err, ErrUnknownMessageType) + s.Require().Equal("", msgType) + }) + + s.Run("unknown message type with message embedded", func() { + identity := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus)) + type msg struct { + *messages.BlockProposal + } + + m := &msg{&messages.BlockProposal{ + Header: nil, + Payload: nil, + }} + + msgType, err := IsAuthorizedSender(identity, network.ConsensusCommittee, m) + s.Require().ErrorIs(err, ErrUnknownMessageType) + s.Require().Equal("", msgType) }) } From ca5269cbe4a52f8418b3c0e520402b3ce812fa22 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 22 Jun 2022 17:21:59 -0400 Subject: [PATCH 021/223] Update authorized_sender_validator_test.go --- network/validator/pubsub/authorized_sender_validator_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index ccc8331cb35..3bf60a1f489 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -4,9 +4,10 @@ import ( "fmt" "testing" - "github.com/onflow/flow-go/model/messages" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/model/messages" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/utils/unittest" From d89f54f2f3cf217f931f658fb0aaf0cd04e446fa Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 22 Jun 2022 18:09:53 -0400 Subject: [PATCH 022/223] use switch statement instead of if else chain --- .../validator/pubsub/authorized_sender_validator.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index 5122fc9cccb..0e8550c13df 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -43,16 +43,17 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel network.Channel, getI } msgType, err := IsAuthorizedSender(identity, channel, msg) - if errors.Is(err, ErrUnauthorizedSender) { + switch { + case errors.Is(err, ErrUnauthorizedSender): slashingViolationsConsumer.OnUnAuthorizedSenderError(identity, from.String(), msgType, err) return pubsub.ValidationReject - } else if errors.Is(err, ErrUnknownMessageType) { + case errors.Is(err, ErrUnknownMessageType): slashingViolationsConsumer.OnUnknownMsgTypeError(identity, from.String(), msgType, err) return pubsub.ValidationReject - } else if errors.Is(err, ErrSenderEjected) { + case errors.Is(err, ErrSenderEjected): slashingViolationsConsumer.OnSenderEjectedError(identity, from.String(), msgType, err) return pubsub.ValidationReject - } else if err != nil { + case err != nil: log.Error(). Err(err). Str("peer_id", from.String()). @@ -61,9 +62,9 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel network.Channel, getI Str("message_type", msgType). Msg("unexpected error during message validation") return pubsub.ValidationReject + default: + return pubsub.ValidationAccept } - - return pubsub.ValidationAccept } } From 928ab22631fbc732e19ad6bcc6c1a4d1173b4cdc Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Wed, 22 Jun 2022 18:36:57 +0100 Subject: [PATCH 023/223] Ignore state context on system transactions --- .../computation/computer/computer.go | 1 + fvm/context.go | 19 +++++++++++++++---- fvm/scriptEnv.go | 4 +++- fvm/transactionEnv.go | 5 ++++- 4 files changed, 23 insertions(+), 6 deletions(-) diff --git a/engine/execution/computation/computer/computer.go b/engine/execution/computation/computer/computer.go index 1df203a039d..fef92c32087 100644 --- a/engine/execution/computation/computer/computer.go +++ b/engine/execution/computation/computer/computer.go @@ -65,6 +65,7 @@ func SystemChunkContext(vmCtx fvm.Context, logger zerolog.Logger) fvm.Context { fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(logger)), fvm.WithMaxStateInteractionSize(SystemChunkLedgerIntractionLimit), fvm.WithEventCollectionSizeLimit(SystemChunkEventCollectionMaxSize), + fvm.WithLoadContextFromState(false), ) } diff --git a/fvm/context.go b/fvm/context.go index 34a0afcd131..af9c35b7757 100644 --- a/fvm/context.go +++ b/fvm/context.go @@ -13,10 +13,12 @@ import ( // A Context defines a set of execution parameters used by the virtual machine. type Context struct { - Chain flow.Chain - Blocks Blocks - Metrics handler.MetricsReporter - Tracer module.Tracer + Chain flow.Chain + Blocks Blocks + Metrics handler.MetricsReporter + Tracer module.Tracer + // LoadContextFromState is a flag telling the fvm to load certain parts of the context from the state + LoadContextFromState bool ComputationLimit uint64 MemoryLimit uint64 MaxStateKeySize uint64 @@ -75,6 +77,7 @@ func defaultContext(logger zerolog.Logger) Context { Blocks: nil, Metrics: &handler.NoopMetricsReporter{}, Tracer: nil, + LoadContextFromState: true, ComputationLimit: DefaultComputationLimit, MemoryLimit: DefaultMemoryLimit, MaxStateKeySize: state.DefaultMaxKeySize, @@ -125,6 +128,14 @@ func WithGasLimit(limit uint64) Option { } } +// WithLoadContextFromState sets if certain context parameters get loaded from the state or not +func WithLoadContextFromState(load bool) Option { + return func(ctx Context) Context { + ctx.LoadContextFromState = load + return ctx + } +} + // WithComputationLimit sets the computation limit for a virtual machine context. func WithComputationLimit(limit uint64) Option { return func(ctx Context) Context { diff --git a/fvm/scriptEnv.go b/fvm/scriptEnv.go index a1e9ee69543..a2b73499d2a 100644 --- a/fvm/scriptEnv.go +++ b/fvm/scriptEnv.go @@ -97,7 +97,9 @@ func NewScriptEnvironment( env.seedRNG(fvmContext.BlockHeader) } - env.setExecutionParameters() + if fvmContext.LoadContextFromState { + env.setExecutionParameters() + } return env } diff --git a/fvm/transactionEnv.go b/fvm/transactionEnv.go index e756e775e80..0482435bb0e 100644 --- a/fvm/transactionEnv.go +++ b/fvm/transactionEnv.go @@ -120,8 +120,11 @@ func NewTransactionEnvironment( env.seedRNG(ctx.BlockHeader) } + var err error // set the execution parameters from the state - err := env.setExecutionParameters() + if ctx.LoadContextFromState { + err = env.setExecutionParameters() + } return env, err } From 7999e72a3145e043c1d073db0f9df2031c7eb591 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Wed, 22 Jun 2022 18:57:34 +0100 Subject: [PATCH 024/223] cleanup --- engine/execution/computation/computer/computer.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/engine/execution/computation/computer/computer.go b/engine/execution/computation/computer/computer.go index fef92c32087..c538572b97d 100644 --- a/engine/execution/computation/computer/computer.go +++ b/engine/execution/computation/computer/computer.go @@ -3,6 +3,7 @@ package computer import ( "context" "fmt" + "math" "runtime" "sync" "time" @@ -65,7 +66,8 @@ func SystemChunkContext(vmCtx fvm.Context, logger zerolog.Logger) fvm.Context { fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(logger)), fvm.WithMaxStateInteractionSize(SystemChunkLedgerIntractionLimit), fvm.WithEventCollectionSizeLimit(SystemChunkEventCollectionMaxSize), - fvm.WithLoadContextFromState(false), + fvm.WithLoadContextFromState(false), // disable reading the memory limit (and computation/memory weights) from the state + fvm.WithMemoryLimit(math.MaxUint64), // and set the memory limit to the maximum ) } From 8321cdce6b2c841097b54dd8ad977f69fbed4b77 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Wed, 22 Jun 2022 19:59:55 +0100 Subject: [PATCH 025/223] add test --- .../computation/computer/computer_test.go | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/engine/execution/computation/computer/computer_test.go b/engine/execution/computation/computer/computer_test.go index b2c41a08b8a..ea1d09c5303 100644 --- a/engine/execution/computation/computer/computer_test.go +++ b/engine/execution/computation/computer/computer_test.go @@ -26,6 +26,7 @@ import ( "github.com/onflow/flow-go/engine/execution/testutil" "github.com/onflow/flow-go/fvm" fvmErrors "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/meter/weighted" "github.com/onflow/flow-go/fvm/programs" "github.com/onflow/flow-go/fvm/state" "github.com/onflow/flow-go/fvm/systemcontracts" @@ -363,6 +364,48 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { assertEventHashesMatch(t, collectionCount+1, result) }) + t.Run("system transaction does not read memory limit from state", func(t *testing.T) { + + weighted.DefaultMemoryWeights = weighted.ExecutionMemoryWeights{ + 0: 1, // single weight set to 1 + } + execCtx := fvm.NewContext( + zerolog.Nop(), + fvm.WithMemoryLimit(10), // the context memory limit is set to 10 + ) + + rt := &testRuntime{ + executeTransaction: func(script runtime.Script, r runtime.Context) error { + err := r.Interface.MeterMemory(common.MemoryUsage{ + Kind: 0, + Amount: 11, + }) + require.NoError(t, err) // should fail if limit is taken from the default context + + return nil + }, + readStored: func(address common.Address, path cadence.Path, r runtime.Context) (cadence.Value, error) { + require.Fail(t, "system chunk should not read context from the state") + return nil, nil + }, + } + + vm := fvm.NewVirtualMachine(rt) + + exe, err := computer.NewBlockComputer(vm, execCtx, metrics.NewNoopCollector(), trace.NewNoopTracer(), zerolog.Nop(), committer.NewNoopViewCommitter()) + require.NoError(t, err) + + block := generateBlock(0, 0, rag) + + view := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + return nil, nil + }) + + result, err := exe.ExecuteBlock(context.Background(), block, view, programs.NewEmptyPrograms()) + assert.NoError(t, err) + assert.Len(t, result.StateSnapshots, 1) // system chunk + }) + t.Run("succeeding transactions store programs", func(t *testing.T) { execCtx := fvm.NewContext(zerolog.Nop()) From 39140e92c8b69119168569d347cefa9c7b04be7e Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Wed, 22 Jun 2022 20:04:02 +0100 Subject: [PATCH 026/223] rename --- fvm/context.go | 68 +++++++++++++++++++++---------------------- fvm/scriptEnv.go | 2 +- fvm/transactionEnv.go | 2 +- 3 files changed, 36 insertions(+), 36 deletions(-) diff --git a/fvm/context.go b/fvm/context.go index af9c35b7757..fc27c58724d 100644 --- a/fvm/context.go +++ b/fvm/context.go @@ -17,18 +17,18 @@ type Context struct { Blocks Blocks Metrics handler.MetricsReporter Tracer module.Tracer - // LoadContextFromState is a flag telling the fvm to load certain parts of the context from the state - LoadContextFromState bool - ComputationLimit uint64 - MemoryLimit uint64 - MaxStateKeySize uint64 - MaxStateValueSize uint64 - MaxStateInteractionSize uint64 - EventCollectionByteSizeLimit uint64 - MaxNumOfTxRetries uint8 - BlockHeader *flow.Header - ServiceAccountEnabled bool - // Depricated: RestrictContractDeployment is deprecated use SetIsContractDeploymentRestrictedTransaction instead. + // AllowContextOverrideByExecutionState is a flag telling the fvm to override certain parts of the context from the state + AllowContextOverrideByExecutionState bool + ComputationLimit uint64 + MemoryLimit uint64 + MaxStateKeySize uint64 + MaxStateValueSize uint64 + MaxStateInteractionSize uint64 + EventCollectionByteSizeLimit uint64 + MaxNumOfTxRetries uint8 + BlockHeader *flow.Header + ServiceAccountEnabled bool + // Depricated: RestrictedDeploymentEnabled is deprecated use SetIsContractDeploymentRestrictedTransaction instead. // Can be removed after all networks are migrated to SetIsContractDeploymentRestrictedTransaction RestrictContractDeployment bool RestrictContractRemoval bool @@ -73,27 +73,27 @@ const ( func defaultContext(logger zerolog.Logger) Context { return Context{ - Chain: flow.Mainnet.Chain(), - Blocks: nil, - Metrics: &handler.NoopMetricsReporter{}, - Tracer: nil, - LoadContextFromState: true, - ComputationLimit: DefaultComputationLimit, - MemoryLimit: DefaultMemoryLimit, - MaxStateKeySize: state.DefaultMaxKeySize, - MaxStateValueSize: state.DefaultMaxValueSize, - MaxStateInteractionSize: state.DefaultMaxInteractionSize, - EventCollectionByteSizeLimit: DefaultEventCollectionByteSizeLimit, - MaxNumOfTxRetries: DefaultMaxNumOfTxRetries, - BlockHeader: nil, - ServiceAccountEnabled: true, - RestrictContractDeployment: true, - RestrictContractRemoval: true, - CadenceLoggingEnabled: false, - EventCollectionEnabled: true, - ServiceEventCollectionEnabled: false, - AccountFreezeAvailable: false, - ExtensiveTracing: false, + Chain: flow.Mainnet.Chain(), + Blocks: nil, + Metrics: &handler.NoopMetricsReporter{}, + Tracer: nil, + AllowContextOverrideByExecutionState: true, + ComputationLimit: DefaultComputationLimit, + MemoryLimit: DefaultMemoryLimit, + MaxStateKeySize: state.DefaultMaxKeySize, + MaxStateValueSize: state.DefaultMaxValueSize, + MaxStateInteractionSize: state.DefaultMaxInteractionSize, + EventCollectionByteSizeLimit: DefaultEventCollectionByteSizeLimit, + MaxNumOfTxRetries: DefaultMaxNumOfTxRetries, + BlockHeader: nil, + ServiceAccountEnabled: true, + RestrictContractDeployment: true, + RestrictContractRemoval: true, + CadenceLoggingEnabled: false, + EventCollectionEnabled: true, + ServiceEventCollectionEnabled: false, + AccountFreezeAvailable: false, + ExtensiveTracing: false, TransactionProcessors: []TransactionProcessor{ NewTransactionAccountFrozenChecker(), NewTransactionSignatureVerifier(AccountKeyWeightThreshold), @@ -131,7 +131,7 @@ func WithGasLimit(limit uint64) Option { // WithLoadContextFromState sets if certain context parameters get loaded from the state or not func WithLoadContextFromState(load bool) Option { return func(ctx Context) Context { - ctx.LoadContextFromState = load + ctx.AllowContextOverrideByExecutionState = load return ctx } } diff --git a/fvm/scriptEnv.go b/fvm/scriptEnv.go index a2b73499d2a..91c1a107e95 100644 --- a/fvm/scriptEnv.go +++ b/fvm/scriptEnv.go @@ -97,7 +97,7 @@ func NewScriptEnvironment( env.seedRNG(fvmContext.BlockHeader) } - if fvmContext.LoadContextFromState { + if fvmContext.AllowContextOverrideByExecutionState { env.setExecutionParameters() } diff --git a/fvm/transactionEnv.go b/fvm/transactionEnv.go index 0482435bb0e..ed184c79c5c 100644 --- a/fvm/transactionEnv.go +++ b/fvm/transactionEnv.go @@ -122,7 +122,7 @@ func NewTransactionEnvironment( var err error // set the execution parameters from the state - if ctx.LoadContextFromState { + if ctx.AllowContextOverrideByExecutionState { err = env.setExecutionParameters() } From 1803c64a8f9f708abf70279c68a1ce8e02cd2264 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Thu, 23 Jun 2022 15:30:33 +0100 Subject: [PATCH 027/223] add fvm test and update option name --- .../computation/computer/computer.go | 4 +- fvm/context.go | 4 +- fvm/fvm_test.go | 50 +++++++++++++++++++ 3 files changed, 54 insertions(+), 4 deletions(-) diff --git a/engine/execution/computation/computer/computer.go b/engine/execution/computation/computer/computer.go index c538572b97d..934a86047d4 100644 --- a/engine/execution/computation/computer/computer.go +++ b/engine/execution/computation/computer/computer.go @@ -66,8 +66,8 @@ func SystemChunkContext(vmCtx fvm.Context, logger zerolog.Logger) fvm.Context { fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(logger)), fvm.WithMaxStateInteractionSize(SystemChunkLedgerIntractionLimit), fvm.WithEventCollectionSizeLimit(SystemChunkEventCollectionMaxSize), - fvm.WithLoadContextFromState(false), // disable reading the memory limit (and computation/memory weights) from the state - fvm.WithMemoryLimit(math.MaxUint64), // and set the memory limit to the maximum + fvm.WithAllowContextOverrideByExecutionState(false), // disable reading the memory limit (and computation/memory weights) from the state + fvm.WithMemoryLimit(math.MaxUint64), // and set the memory limit to the maximum ) } diff --git a/fvm/context.go b/fvm/context.go index fc27c58724d..d472311b869 100644 --- a/fvm/context.go +++ b/fvm/context.go @@ -128,8 +128,8 @@ func WithGasLimit(limit uint64) Option { } } -// WithLoadContextFromState sets if certain context parameters get loaded from the state or not -func WithLoadContextFromState(load bool) Option { +// WithAllowContextOverrideByExecutionState sets if certain context parameters get loaded from the state or not +func WithAllowContextOverrideByExecutionState(load bool) Option { return func(ctx Context) Context { ctx.AllowContextOverrideByExecutionState = load return ctx diff --git a/fvm/fvm_test.go b/fvm/fvm_test.go index 5b14b88d1c6..effd3d24dba 100644 --- a/fvm/fvm_test.go +++ b/fvm/fvm_test.go @@ -1289,6 +1289,56 @@ func TestSettingExecutionWeights(t *testing.T) { }, )) + t.Run("transaction should not read context from the state if AllowContextOverrideByExecutionState if false", newVMTest(). + withBootstrapProcedureOptions( + fvm.WithMinimumStorageReservation(fvm.DefaultMinimumStorageReservation), + fvm.WithAccountCreationFee(fvm.DefaultAccountCreationFee), + fvm.WithStorageMBPerFLOW(fvm.DefaultStorageMBPerFLOW), + fvm.WithExecutionEffortWeights( + weightedMeter.ExecutionEffortWeights{ + meter.ComputationKindCreateAccount: 1_000_000_000_000 << weightedMeter.MeterExecutionInternalPrecisionBytes, + }, + ), + fvm.WithExecutionMemoryWeights( + weightedMeter.ExecutionMemoryWeights{ + common.MemoryKindBreakStatement: 1_000_000_000_000, + }, + ), + fvm.WithExecutionMemoryLimit(0), + ).withContextOptions( + fvm.WithAllowContextOverrideByExecutionState(false), + fvm.WithMemoryLimit(math.MaxUint64), + ).run( + func(t *testing.T, vm *fvm.VirtualMachine, chain flow.Chain, ctx fvm.Context, view state.View, programs *programs.Programs) { + txBody := flow.NewTransactionBody(). + SetScript([]byte(` + transaction { + prepare(signer: AuthAccount) { + while true { + AuthAccount(payer: signer) + break + } + } + } + `)). + SetProposalKey(chain.ServiceAddress(), 0, 0). + AddAuthorizer(chain.ServiceAddress()). + SetPayer(chain.ServiceAddress()). + SetGasLimit(1_000) + + err := testutil.SignTransactionAsServiceAccount(txBody, 0, chain) + require.NoError(t, err) + + tx := fvm.Transaction(txBody, 0) + err = vm.Run(ctx, tx, view, programs) + // tx would fail if ExecutionEffortWeights from the state were used due to computation limit + // tx would fail if ExecutionMemoryWeights from the state were used due to memory limit + // tx would fail if MemoryLimit from the state was used due to memory limit + require.NoError(t, err) + require.NoError(t, tx.Err) + }, + )) + t.Run("transaction should not use up more computation that the transaction body itself", newVMTest().withBootstrapProcedureOptions( fvm.WithMinimumStorageReservation(fvm.DefaultMinimumStorageReservation), fvm.WithAccountCreationFee(fvm.DefaultAccountCreationFee), From 4b11b966e4845b0f52d29ed6a91980a303cd0c95 Mon Sep 17 00:00:00 2001 From: Jordan Schalm Date: Fri, 24 Jun 2022 11:36:51 -0700 Subject: [PATCH 028/223] extend consensus follower config to support compliance settings --- cmd/scaffold.go | 7 +++++++ follower/consensus_follower.go | 29 ++++++++++++++++++++--------- 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/cmd/scaffold.go b/cmd/scaffold.go index 4b19594f820..5834d48b3f4 100644 --- a/cmd/scaffold.go +++ b/cmd/scaffold.go @@ -31,6 +31,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/compliance" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/id" "github.com/onflow/flow-go/module/irrecoverable" @@ -1178,6 +1179,12 @@ func WithSyncCoreConfig(syncConfig synchronization.Config) Option { } } +func WithComplianceConfig(complianceConfig compliance.Config) Option { + return func(config *BaseConfig) { + config.ComplianceConfig = complianceConfig + } +} + func WithLogLevel(level string) Option { return func(config *BaseConfig) { config.level = level diff --git a/follower/consensus_follower.go b/follower/consensus_follower.go index b7c1d24bf28..f43094965f9 100644 --- a/follower/consensus_follower.go +++ b/follower/consensus_follower.go @@ -14,6 +14,7 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/compliance" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/synchronization" @@ -33,15 +34,16 @@ type ConsensusFollower interface { // Config contains the configurable fields for a `ConsensusFollower`. type Config struct { - networkPrivKey crypto.PrivateKey // the network private key of this node - bootstrapNodes []BootstrapNodeInfo // the bootstrap nodes to use - bindAddr string // address to bind on - db *badger.DB // the badger DB storage to use for the protocol state - dataDir string // directory to store the protocol state (if the badger storage is not provided) - bootstrapDir string // path to the bootstrap directory - logLevel string // log level - exposeMetrics bool // whether to expose metrics - syncConfig *synchronization.Config // sync core configuration + networkPrivKey crypto.PrivateKey // the network private key of this node + bootstrapNodes []BootstrapNodeInfo // the bootstrap nodes to use + bindAddr string // address to bind on + db *badger.DB // the badger DB storage to use for the protocol state + dataDir string // directory to store the protocol state (if the badger storage is not provided) + bootstrapDir string // path to the bootstrap directory + logLevel string // log level + exposeMetrics bool // whether to expose metrics + syncConfig *synchronization.Config // sync core configuration + complianceConfig *compliance.Config // follower engine configuration } type Option func(c *Config) @@ -89,6 +91,12 @@ func WithSyncCoreConfig(config *synchronization.Config) Option { } } +func WithComplianceConfig(config *compliance.Config) Option { + return func(c *Config) { + c.complianceConfig = config + } +} + // BootstrapNodeInfo contains the details about the upstream bootstrap peer the consensus follower uses type BootstrapNodeInfo struct { Host string // ip or hostname @@ -144,6 +152,9 @@ func getBaseOptions(config *Config) []cmd.Option { if config.syncConfig != nil { options = append(options, cmd.WithSyncCoreConfig(*config.syncConfig)) } + if config.complianceConfig != nil { + options = append(options, cmd.WithComplianceConfig(*config.complianceConfig)) + } return options } From ab6981580a5edb6953d39c0a29168df2817a65f2 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Mon, 27 Jun 2022 15:05:54 +0100 Subject: [PATCH 029/223] All accounts should be bootstrapped with a key --- .../state/bootstrap/bootstrap_test.go | 2 +- fvm/bootstrap.go | 28 +++++++++---------- utils/unittest/execution_state.go | 2 +- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/engine/execution/state/bootstrap/bootstrap_test.go b/engine/execution/state/bootstrap/bootstrap_test.go index 33613c6909b..b762b644239 100644 --- a/engine/execution/state/bootstrap/bootstrap_test.go +++ b/engine/execution/state/bootstrap/bootstrap_test.go @@ -47,7 +47,7 @@ func TestBootstrapLedger(t *testing.T) { } func TestBootstrapLedger_ZeroTokenSupply(t *testing.T) { - expectedStateCommitmentBytes, _ := hex.DecodeString("cae2f2d6c53582503dad30dba8a8bba098ff8654d19b820a49e1961c1f459a41") + expectedStateCommitmentBytes, _ := hex.DecodeString("4c9e435159b5106f26caa402d040e8708474943394ffe5aa55382daea4448424") expectedStateCommitment, err := flow.ToStateCommitment(expectedStateCommitmentBytes) require.NoError(t, err) diff --git a/fvm/bootstrap.go b/fvm/bootstrap.go index ec32198de10..f8ecf3caef4 100644 --- a/fvm/bootstrap.go +++ b/fvm/bootstrap.go @@ -237,9 +237,9 @@ func (b *BootstrapProcedure) Run(vm *VirtualMachine, ctx Context, sth *state.Sta service := b.createServiceAccount(b.serviceAccountPublicKey) b.deployContractAuditVouchers(service) - fungibleToken := b.deployFungibleToken() - flowToken := b.deployFlowToken(service, fungibleToken) - feeContract := b.deployFlowFees(service, fungibleToken, flowToken) + fungibleToken := b.deployFungibleToken(b.serviceAccountPublicKey) + flowToken := b.deployFlowToken(b.serviceAccountPublicKey, service, fungibleToken) + feeContract := b.deployFlowFees(b.serviceAccountPublicKey, service, fungibleToken, flowToken) b.deployStorageFees(service, fungibleToken, flowToken) if b.initialTokenSupply > 0 { @@ -292,7 +292,7 @@ func (b *BootstrapProcedure) Run(vm *VirtualMachine, ctx Context, sth *state.Sta // deploy staking collection contract to the service account b.deployStakingCollection(service, fungibleToken, flowToken) - b.registerNodes(service, fungibleToken, flowToken) + b.registerNodes(b.serviceAccountPublicKey, service, fungibleToken, flowToken) return nil } @@ -305,13 +305,13 @@ func (proc *BootstrapProcedure) MemoryLimit(_ Context) uint64 { return math.MaxUint64 } -func (b *BootstrapProcedure) createAccount() flow.Address { +func (b *BootstrapProcedure) createAccount(publicKey flow.AccountPublicKey) flow.Address { address, err := b.addressGenerator.NextAddress() if err != nil { panic(fmt.Sprintf("failed to generate address: %s", err)) } - err = b.accounts.Create(nil, address) + err = b.accounts.Create([]flow.AccountPublicKey{publicKey}, address) if err != nil { panic(fmt.Sprintf("failed to create account: %s", err)) } @@ -333,8 +333,8 @@ func (b *BootstrapProcedure) createServiceAccount(accountKey flow.AccountPublicK return address } -func (b *BootstrapProcedure) deployFungibleToken() flow.Address { - fungibleToken := b.createAccount() +func (b *BootstrapProcedure) deployFungibleToken(publicKey flow.AccountPublicKey) flow.Address { + fungibleToken := b.createAccount(publicKey) txError, err := b.vm.invokeMetaTransaction( b.ctx, @@ -348,8 +348,8 @@ func (b *BootstrapProcedure) deployFungibleToken() flow.Address { return fungibleToken } -func (b *BootstrapProcedure) deployFlowToken(service, fungibleToken flow.Address) flow.Address { - flowToken := b.createAccount() +func (b *BootstrapProcedure) deployFlowToken(publicKey flow.AccountPublicKey, service, fungibleToken flow.Address) flow.Address { + flowToken := b.createAccount(publicKey) txError, err := b.vm.invokeMetaTransaction( b.ctx, Transaction( @@ -365,8 +365,8 @@ func (b *BootstrapProcedure) deployFlowToken(service, fungibleToken flow.Address return flowToken } -func (b *BootstrapProcedure) deployFlowFees(service, fungibleToken, flowToken flow.Address) flow.Address { - flowFees := b.createAccount() +func (b *BootstrapProcedure) deployFlowFees(publicKey flow.AccountPublicKey, service, fungibleToken, flowToken flow.Address) flow.Address { + flowFees := b.createAccount(publicKey) txError, err := b.vm.invokeMetaTransaction( b.ctx, @@ -713,11 +713,11 @@ func (b *BootstrapProcedure) setStakingAllowlist(service flow.Address, allowedID panicOnMetaInvokeErrf("failed to set staking allow-list: %s", txError, err) } -func (b *BootstrapProcedure) registerNodes(service, fungibleToken, flowToken flow.Address) { +func (b *BootstrapProcedure) registerNodes(publicKey flow.AccountPublicKey, service, fungibleToken, flowToken flow.Address) { for _, id := range b.identities { // create a staking account for the node - nodeAddress := b.createAccount() + nodeAddress := b.createAccount(publicKey) // give a vault resource to the staking account txError, err := b.vm.invokeMetaTransaction( diff --git a/utils/unittest/execution_state.go b/utils/unittest/execution_state.go index 3a10537863b..15b550eadf7 100644 --- a/utils/unittest/execution_state.go +++ b/utils/unittest/execution_state.go @@ -24,7 +24,7 @@ const ServiceAccountPrivateKeySignAlgo = crypto.ECDSAP256 const ServiceAccountPrivateKeyHashAlgo = hash.SHA2_256 // Pre-calculated state commitment with root account with the above private key -const GenesisStateCommitmentHex = "5b8f8283d5e719672cb53c0e20a822bf0782f4345d09df076c14fba4d9e21da0" +const GenesisStateCommitmentHex = "c2c2a53f440e40628003e38ac966b5ef278da75ceacb92ff78a00b99fa7ebd62" var GenesisStateCommitment flow.StateCommitment From 2caf8cd88249e804d18e802a6dc234883f4b33e2 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Mon, 27 Jun 2022 16:32:16 +0100 Subject: [PATCH 030/223] test fix --- engine/execution/computation/execution_verification_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/execution/computation/execution_verification_test.go b/engine/execution/computation/execution_verification_test.go index b331ddb27ec..0e7443fad36 100644 --- a/engine/execution/computation/execution_verification_test.go +++ b/engine/execution/computation/execution_verification_test.go @@ -160,7 +160,7 @@ func Test_ExecutionMatchesVerification(t *testing.T) { err = testutil.SignTransaction(addKeyTx, accountAddress, accountPrivKey, 0) require.NoError(t, err) - minimumStorage, err := cadence.NewUFix64("0.00008164") + minimumStorage, err := cadence.NewUFix64("0.00008312") require.NoError(t, err) cr := executeBlockAndVerify(t, [][]*flow.TransactionBody{ From d27c8e1820fb452f3525a4b8f10b88f93542c0a0 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 27 Jun 2022 12:38:03 -0400 Subject: [PATCH 031/223] add withLogger parameter option to nodeFixture --- network/p2p/fixture_test.go | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/network/p2p/fixture_test.go b/network/p2p/fixture_test.go index b66171c04a0..a3e6077ad31 100644 --- a/network/p2p/fixture_test.go +++ b/network/p2p/fixture_test.go @@ -43,6 +43,7 @@ type nodeFixtureParameters struct { dhtOptions []dht.Option peerFilter p2p.PeerFilter role flow.Role + logger zerolog.Logger } type nodeFixtureParameterOption func(*nodeFixtureParameters) @@ -89,6 +90,12 @@ func withRole(role flow.Role) nodeFixtureParameterOption { } } +func withLogger(logger zerolog.Logger) nodeFixtureParameterOption { + return func(p *nodeFixtureParameters) { + p.logger = logger + } +} + // nodeFixture is a test fixture that creates a single libp2p node with the given key, spork id, and options. // It returns the node and its identity. func nodeFixture( @@ -98,14 +105,13 @@ func nodeFixture( dhtPrefix string, opts ...nodeFixtureParameterOption, ) (*p2p.Node, flow.Identity) { - logger := unittest.Logger().Level(zerolog.ErrorLevel) - // default parameters parameters := &nodeFixtureParameters{ handlerFunc: func(network.Stream) {}, unicasts: nil, key: generateNetworkingKey(t), address: defaultAddress, + logger: unittest.Logger().Level(zerolog.ErrorLevel), } for _, opt := range opts { @@ -118,22 +124,22 @@ func nodeFixture( unittest.WithRole(parameters.role)) noopMetrics := metrics.NewNoopCollector() - connManager := p2p.NewConnManager(logger, noopMetrics) + connManager := p2p.NewConnManager(parameters.logger, noopMetrics) - builder := p2p.NewNodeBuilder(logger, parameters.address, parameters.key, sporkID). + builder := p2p.NewNodeBuilder(parameters.logger, parameters.address, parameters.key, sporkID). SetConnectionManager(connManager). SetPubSub(pubsub.NewGossipSub). SetRoutingSystem(func(c context.Context, h host.Host) (routing.Routing, error) { return p2p.NewDHT(c, h, protocol.ID(unicast.FlowDHTProtocolIDPrefix+sporkID.String()+"/"+dhtPrefix), - logger, + parameters.logger, noopMetrics, parameters.dhtOptions..., ) }) if parameters.peerFilter != nil { - connGater := p2p.NewConnGater(logger, parameters.peerFilter) + connGater := p2p.NewConnGater(parameters.logger, parameters.peerFilter) builder.SetConnectionGater(connGater) } From d29ed7d53434687e0ca20fa44ad1e1d4c8300e6d Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Mon, 27 Jun 2022 21:10:51 +0100 Subject: [PATCH 032/223] Add botstrap key options --- fvm/bootstrap.go | 72 ++++++++++++++++++++++------------- model/flow/account_encoder.go | 20 ++++++++++ 2 files changed, 66 insertions(+), 26 deletions(-) diff --git a/fvm/bootstrap.go b/fvm/bootstrap.go index f8ecf3caef4..78ddbd6e2ea 100644 --- a/fvm/bootstrap.go +++ b/fvm/bootstrap.go @@ -27,9 +27,9 @@ type BootstrapProcedure struct { rootBlock *flow.Header // genesis parameters - serviceAccountPublicKey flow.AccountPublicKey - initialTokenSupply cadence.UFix64 - addressGenerator flow.AddressGenerator + accountKeys BootstrapAccountKeys + initialTokenSupply cadence.UFix64 + addressGenerator flow.AddressGenerator accountCreationFee cadence.UFix64 minimumStorageReservation cadence.UFix64 @@ -55,6 +55,14 @@ type BootstrapProcedure struct { identities flow.IdentityList } +type BootstrapAccountKeys struct { + ServiceAccountPublicKeys []flow.AccountPublicKey + FungibleTokenAccountPublicKeys []flow.AccountPublicKey + FlowTokenAccountPublicKeys []flow.AccountPublicKey + FlowFeesAccountPublicKeys []flow.AccountPublicKey + NodeAccountPublicKeys []flow.AccountPublicKey +} + type BootstrapProcedureFeeParameters struct { SurgeFactor cadence.UFix64 InclusionEffortCost cadence.UFix64 @@ -118,6 +126,15 @@ var DefaultTransactionFees = func() BootstrapProcedureFeeParameters { } }() +// WithBootstrapAccountKeys sets the public keys of the accounts that will be created during bootstrapping +// by default all accounts are created with the ServiceAccountPublicKey specified when calling `Bootstrap`. +func WithBootstrapAccountKeys(keys BootstrapAccountKeys) BootstrapProcedureOption { + return func(bp *BootstrapProcedure) *BootstrapProcedure { + bp.accountKeys = keys + return bp + } +} + func WithAccountCreationFee(fee cadence.UFix64) BootstrapProcedureOption { return func(bp *BootstrapProcedure) *BootstrapProcedure { bp.accountCreationFee = fee @@ -209,9 +226,14 @@ func Bootstrap( opts ...BootstrapProcedureOption, ) *BootstrapProcedure { bootstrapProcedure := &BootstrapProcedure{ - serviceAccountPublicKey: serviceAccountPublicKey, - transactionFees: BootstrapProcedureFeeParameters{0, 0, 0}, - epochConfig: epochs.DefaultEpochConfig(), + transactionFees: BootstrapProcedureFeeParameters{0, 0, 0}, + epochConfig: epochs.DefaultEpochConfig(), + } + bootstrapProcedure.accountKeys = BootstrapAccountKeys{ + ServiceAccountPublicKeys: []flow.AccountPublicKey{serviceAccountPublicKey}, + FungibleTokenAccountPublicKeys: []flow.AccountPublicKey{serviceAccountPublicKey}, + FlowTokenAccountPublicKeys: []flow.AccountPublicKey{serviceAccountPublicKey}, + NodeAccountPublicKeys: []flow.AccountPublicKey{serviceAccountPublicKey}, } for _, applyOption := range opts { @@ -234,12 +256,12 @@ func (b *BootstrapProcedure) Run(vm *VirtualMachine, ctx Context, sth *state.Sta addressGenerator := state.NewStateBoundAddressGenerator(b.sth, ctx.Chain) b.addressGenerator = addressGenerator - service := b.createServiceAccount(b.serviceAccountPublicKey) + service := b.createServiceAccount() b.deployContractAuditVouchers(service) - fungibleToken := b.deployFungibleToken(b.serviceAccountPublicKey) - flowToken := b.deployFlowToken(b.serviceAccountPublicKey, service, fungibleToken) - feeContract := b.deployFlowFees(b.serviceAccountPublicKey, service, fungibleToken, flowToken) + fungibleToken := b.deployFungibleToken() + flowToken := b.deployFlowToken(service, fungibleToken) + feeContract := b.deployFlowFees(service, fungibleToken, flowToken) b.deployStorageFees(service, fungibleToken, flowToken) if b.initialTokenSupply > 0 { @@ -292,7 +314,7 @@ func (b *BootstrapProcedure) Run(vm *VirtualMachine, ctx Context, sth *state.Sta // deploy staking collection contract to the service account b.deployStakingCollection(service, fungibleToken, flowToken) - b.registerNodes(b.serviceAccountPublicKey, service, fungibleToken, flowToken) + b.registerNodes(service, fungibleToken, flowToken) return nil } @@ -305,13 +327,13 @@ func (proc *BootstrapProcedure) MemoryLimit(_ Context) uint64 { return math.MaxUint64 } -func (b *BootstrapProcedure) createAccount(publicKey flow.AccountPublicKey) flow.Address { +func (b *BootstrapProcedure) createAccount(publicKeys []flow.AccountPublicKey) flow.Address { address, err := b.addressGenerator.NextAddress() if err != nil { panic(fmt.Sprintf("failed to generate address: %s", err)) } - err = b.accounts.Create([]flow.AccountPublicKey{publicKey}, address) + err = b.accounts.Create(publicKeys, address) if err != nil { panic(fmt.Sprintf("failed to create account: %s", err)) } @@ -319,13 +341,13 @@ func (b *BootstrapProcedure) createAccount(publicKey flow.AccountPublicKey) flow return address } -func (b *BootstrapProcedure) createServiceAccount(accountKey flow.AccountPublicKey) flow.Address { +func (b *BootstrapProcedure) createServiceAccount() flow.Address { address, err := b.addressGenerator.NextAddress() if err != nil { panic(fmt.Sprintf("failed to generate address: %s", err)) } - err = b.accounts.Create([]flow.AccountPublicKey{accountKey}, address) + err = b.accounts.Create(b.accountKeys.ServiceAccountPublicKeys, address) if err != nil { panic(fmt.Sprintf("failed to create service account: %s", err)) } @@ -333,8 +355,8 @@ func (b *BootstrapProcedure) createServiceAccount(accountKey flow.AccountPublicK return address } -func (b *BootstrapProcedure) deployFungibleToken(publicKey flow.AccountPublicKey) flow.Address { - fungibleToken := b.createAccount(publicKey) +func (b *BootstrapProcedure) deployFungibleToken() flow.Address { + fungibleToken := b.createAccount(b.accountKeys.FungibleTokenAccountPublicKeys) txError, err := b.vm.invokeMetaTransaction( b.ctx, @@ -348,8 +370,8 @@ func (b *BootstrapProcedure) deployFungibleToken(publicKey flow.AccountPublicKey return fungibleToken } -func (b *BootstrapProcedure) deployFlowToken(publicKey flow.AccountPublicKey, service, fungibleToken flow.Address) flow.Address { - flowToken := b.createAccount(publicKey) +func (b *BootstrapProcedure) deployFlowToken(service, fungibleToken flow.Address) flow.Address { + flowToken := b.createAccount(b.accountKeys.FlowTokenAccountPublicKeys) txError, err := b.vm.invokeMetaTransaction( b.ctx, Transaction( @@ -365,8 +387,8 @@ func (b *BootstrapProcedure) deployFlowToken(publicKey flow.AccountPublicKey, se return flowToken } -func (b *BootstrapProcedure) deployFlowFees(publicKey flow.AccountPublicKey, service, fungibleToken, flowToken flow.Address) flow.Address { - flowFees := b.createAccount(publicKey) +func (b *BootstrapProcedure) deployFlowFees(service, fungibleToken, flowToken flow.Address) flow.Address { + flowFees := b.createAccount(b.accountKeys.FlowFeesAccountPublicKeys) txError, err := b.vm.invokeMetaTransaction( b.ctx, @@ -713,11 +735,11 @@ func (b *BootstrapProcedure) setStakingAllowlist(service flow.Address, allowedID panicOnMetaInvokeErrf("failed to set staking allow-list: %s", txError, err) } -func (b *BootstrapProcedure) registerNodes(publicKey flow.AccountPublicKey, service, fungibleToken, flowToken flow.Address) { +func (b *BootstrapProcedure) registerNodes(service, fungibleToken, flowToken flow.Address) { for _, id := range b.identities { // create a staking account for the node - nodeAddress := b.createAccount(publicKey) + nodeAddress := b.createAccount(b.accountKeys.NodeAccountPublicKeys) // give a vault resource to the staking account txError, err := b.vm.invokeMetaTransaction( @@ -780,12 +802,10 @@ func (b *BootstrapProcedure) deployStakingProxyContract(service flow.Address) { func (b *BootstrapProcedure) deployLockedTokensContract(service flow.Address, fungibleTokenAddress, flowTokenAddress flow.Address) { - publicKeys := make([]cadence.Value, 1) - encodedPublicKey, err := flow.EncodeRuntimeAccountPublicKey(b.serviceAccountPublicKey) + publicKeys, err := flow.EncodeRuntimeAccountPublicKeys(b.accountKeys.ServiceAccountPublicKeys) if err != nil { panic(err) } - publicKeys[0] = blueprints.BytesToCadenceArray(encodedPublicKey) contract := contracts.FlowLockedTokens( fungibleTokenAddress.Hex(), diff --git a/model/flow/account_encoder.go b/model/flow/account_encoder.go index 91b78fa04ed..9b4bf1ad5ae 100644 --- a/model/flow/account_encoder.go +++ b/model/flow/account_encoder.go @@ -5,6 +5,8 @@ package flow import ( "github.com/ethereum/go-ethereum/rlp" + "github.com/onflow/cadence" + "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" ) @@ -58,6 +60,24 @@ func EncodeAccountPublicKey(a AccountPublicKey) ([]byte, error) { return rlp.EncodeToBytes(&w) } +func EncodeRuntimeAccountPublicKeys(keys []AccountPublicKey) ([]cadence.Value, error) { + encodedKeys := make([]cadence.Value, len(keys)) + for i, key := range keys { + k, err := EncodeRuntimeAccountPublicKey(key) + if err != nil { + return nil, err + } + + values := make([]cadence.Value, len(k)) + for j, v := range k { + values[j] = cadence.NewUInt8(v) + } + encodedKeys[i] = cadence.NewArray(values) + } + + return encodedKeys, nil +} + func EncodeRuntimeAccountPublicKey(a AccountPublicKey) ([]byte, error) { publicKey := a.PublicKey.Encode() From 43788c7e0115fdffd9676cf79ab3beeb97edac02 Mon Sep 17 00:00:00 2001 From: Simon Zhu Date: Mon, 27 Jun 2022 17:19:50 -0400 Subject: [PATCH 033/223] add logs --- engine/execution/computation/computer/computer.go | 3 +++ engine/execution/computation/manager.go | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/engine/execution/computation/computer/computer.go b/engine/execution/computation/computer/computer.go index af923a2a7f6..cf53ed4ea92 100644 --- a/engine/execution/computation/computer/computer.go +++ b/engine/execution/computation/computer/computer.go @@ -211,6 +211,9 @@ func (e *blockComputer) executeBlock( } wg.Wait() + + e.log.Debug().Hex("block_id", logging.Entity(block)).Msg("all views committed") + res.StateReads = stateView.(*delta.View).ReadsCount() return res, nil diff --git a/engine/execution/computation/manager.go b/engine/execution/computation/manager.go index 6b9df2a784a..57186ec50d4 100644 --- a/engine/execution/computation/manager.go +++ b/engine/execution/computation/manager.go @@ -268,6 +268,8 @@ func (e *Manager) ComputeBlock( return nil, fmt.Errorf("failed to execute block: %w", err) } + e.log.Debug().Hex("block_id", logging.Entity(block.Block)).Msg("block result computed") + toInsert := blockPrograms // if we have item from cache and there were no changes @@ -278,6 +280,8 @@ func (e *Manager) ComputeBlock( e.programsCache.Set(block.ID(), toInsert) + e.log.Debug().Hex("block_id", logging.Entity(block.Block)).Msg("programs cache updated") + group, uploadCtx := errgroup.WithContext(ctx) var rootID flow.Identifier var blobTree [][]cid.Cid From 64cfb1b6a15d0061b54fb7bb809aaa321548fa6b Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 28 Jun 2022 09:07:22 -0400 Subject: [PATCH 034/223] move message_authorization_config -> network/message/authorization - rename MsgAuthConfig String & Interface fields to Name & Type - move message_authorization_config -> network/message/authorization - fix import cycle caused by above, move channels -> channel package - fix import cycle caused by above move Topic declaration from network/middleware.go -> channels package - add comments for MsgAuthConfig member variables - fix capitalization in ErrUknownMsgType - move msg auth config init to init.go in message package - refactor keys as constants - add 10s delay for node fixture get IP require.Eventually tick - regenerate mocks --- .../node_builder/access_node_builder.go | 7 +- cmd/collection/main.go | 4 +- cmd/consensus/main.go | 4 +- cmd/execution_builder.go | 8 +- cmd/observer/node_builder/observer_builder.go | 5 +- cmd/util/cmd/epochs/cmd/templates.go | 6 +- consensus/integration/integration_test.go | 8 +- consensus/integration/network_test.go | 19 +- engine/access/access_test.go | 8 +- engine/access/ingestion/engine.go | 7 +- engine/access/ingestion/engine_test.go | 4 +- engine/access/relay/engine.go | 17 +- engine/access/relay/engine_test.go | 12 +- engine/access/relay/example_test.go | 12 +- engine/access/rest/request/script_test.go | 2 +- engine/access/rest/scripts_test.go | 4 +- engine/collection/compliance/engine.go | 7 +- engine/collection/compliance/engine_test.go | 5 +- engine/collection/ingest/engine.go | 5 +- engine/collection/pusher/engine.go | 7 +- engine/collection/pusher/engine_test.go | 4 +- engine/collection/synchronization/engine.go | 7 +- .../collection/synchronization/engine_test.go | 9 +- .../synchronization/request_handler.go | 5 +- .../test/cluster_switchover_test.go | 4 +- engine/common/follower/engine.go | 7 +- engine/common/follower/engine_test.go | 10 +- engine/common/provider/engine.go | 9 +- engine/common/requester/engine.go | 11 +- engine/common/splitter/engine.go | 7 +- engine/common/splitter/engine_test.go | 8 +- .../common/splitter/network/example_test.go | 6 +- engine/common/splitter/network/network.go | 13 +- .../common/splitter/network/network_test.go | 13 +- engine/common/synchronization/engine.go | 7 +- engine/common/synchronization/engine_test.go | 7 +- .../common/synchronization/request_handler.go | 4 +- .../synchronization/request_handler_engine.go | 5 +- engine/consensus/compliance/core_test.go | 3 +- engine/consensus/compliance/engine.go | 7 +- engine/consensus/compliance/engine_test.go | 6 +- engine/consensus/dkg/messaging_engine.go | 7 +- engine/consensus/dkg/messaging_engine_test.go | 4 +- engine/consensus/ingestion/engine.go | 7 +- engine/consensus/ingestion/engine_test.go | 7 +- engine/consensus/matching/engine.go | 7 +- engine/consensus/matching/engine_test.go | 4 +- engine/consensus/provider/engine.go | 7 +- engine/consensus/sealing/engine.go | 9 +- engine/consensus/sealing/engine_test.go | 8 +- engine/execution/execution_test.go | 18 +- engine/execution/ingestion/engine.go | 7 +- engine/execution/ingestion/engine_test.go | 3 +- engine/execution/provider/engine.go | 9 +- .../provider/mock/provider_engine.go | 11 +- engine/ghost/client/ghost_client.go | 3 +- engine/ghost/engine/handler.go | 7 +- engine/ghost/engine/rpc.go | 35 +- engine/testutil/nodes.go | 8 +- engine/verification/requester/requester.go | 7 +- .../verification/requester/requester_test.go | 14 +- engine/verification/utils/unittest/helper.go | 5 +- engine/verification/verifier/engine.go | 9 +- engine/verification/verifier/engine_test.go | 6 +- follower/follower_builder.go | 3 +- fvm/blueprints/epochs.go | 4 +- fvm/fvm_bench_test.go | 36 +- fvm/fvm_blockcontext_test.go | 12 +- fvm/fvm_signature_test.go | 2 +- fvm/fvm_test.go | 4 +- fvm/handler/programs_test.go | 8 +- insecure/attacknetwork/attackNetwork.go | 5 +- insecure/conduitController.go | 6 +- insecure/corruptible/conduit.go | 4 +- insecure/corruptible/conduit_test.go | 18 +- insecure/corruptible/factory.go | 13 +- insecure/corruptible/factory_test.go | 26 +- insecure/event.go | 10 +- insecure/fixtures.go | 3 +- .../integration/test/composability_test.go | 2 +- insecure/mock/conduit_controller.go | 14 +- insecure/mock/conduit_master.go | 3 +- insecure/wintermute/attackOrchestrator.go | 4 +- .../wintermute/attackOrchestrator_test.go | 4 +- insecure/wintermute/helpers.go | 8 +- integration/dkg/dkg_emulator_suite.go | 2 +- integration/tests/consensus/inclusion_test.go | 4 +- integration/tests/consensus/sealing_test.go | 8 +- .../tests/execution/chunk_data_pack_test.go | 4 +- .../tests/execution/state_sync_test.go | 4 +- .../tests/ghost/ghost_node_example_test.go | 4 +- integration/tests/network/network_test.go | 4 +- integration/utils/scripts.go | 10 +- integration/utils/tx_stats_tracker_test.go | 2 +- ledger/complete/mtrie/trie/trie_test.go | 2 +- model/convert/fixtures/fixture.go | 90 ++-- model/convert/fixtures_test.go | 90 ++-- model/flow/identifierList.go | 6 +- model/flow/ledger_test.go | 2 +- model/flow/role.go | 6 +- module/dkg/client.go | 2 +- module/epochs/epoch_config.go | 2 +- module/metrics/example/collection/main.go | 6 +- module/metrics/example/consensus/main.go | 6 +- module/mock/dht_metrics.go | 35 ++ module/mock/network_metrics.go | 10 + module/mock/sealing_configs_getter.go | 95 +++++ module/mock/sealing_configs_setter.go | 116 ++++++ network/cache/rcvcache_test.go | 14 +- network/{ => channels}/channel.go | 8 +- network/{ => channels}/channels.go | 2 +- network/{ => channels}/channels_test.go | 14 +- network/channels/topic.go | 9 + network/conduit.go | 3 +- network/converter/network.go | 11 +- network/engine.go | 7 +- network/message/authorization.go | 391 +++++++++++++++++ network/message/init.go | 36 ++ network/message_authorization_config.go | 393 ------------------ network/middleware.go | 18 +- network/mocknetwork/adapter.go | 20 +- network/mocknetwork/conduit_factory.go | 11 +- network/mocknetwork/engine.go | 10 +- network/mocknetwork/message_processor.go | 8 +- network/mocknetwork/middleware.go | 18 +- network/mocknetwork/mock_network.go | 5 +- network/mocknetwork/network.go | 15 +- network/mocknetwork/subscription_manager.go | 26 +- network/mocknetwork/topology.go | 18 +- network/network.go | 13 +- network/p2p/conduit/conduit.go | 5 +- network/p2p/dht_test.go | 4 +- network/p2p/fixture_test.go | 11 +- network/p2p/libp2pNode.go | 17 +- network/p2p/libp2pNodeBuilder.go | 6 +- network/p2p/middleware.go | 17 +- network/p2p/network.go | 33 +- network/p2p/sporking_test.go | 10 +- network/p2p/subscriptionManager.go | 15 +- network/p2p/subscription_filter.go | 8 +- network/p2p/subscription_filter_test.go | 18 +- network/p2p/topic_validator_test.go | 26 +- network/proxy/network.go | 3 +- network/proxy/network_test.go | 9 +- network/queue/eventPriority.go | 10 +- network/relay/network.go | 11 +- network/relay/relayer.go | 7 +- network/stub/buffer.go | 4 +- network/stub/hash.go | 4 +- network/stub/network.go | 25 +- network/subscription.go | 10 +- network/test/blob_service_test.go | 5 +- network/test/echoengine.go | 11 +- network/test/echoengine_test.go | 49 +-- network/test/meshengine.go | 19 +- network/test/meshengine_test.go | 17 +- network/test/middleware_test.go | 3 +- network/test/testUtil.go | 3 +- network/topology.go | 3 +- network/topology/cache.go | 3 +- network/topology/cache_test.go | 12 +- network/topology/fixedListTopology.go | 5 +- network/topology/fullyConnectedTopology.go | 3 +- network/topology/helper.go | 7 +- network/topology/randomizedTopology.go | 15 +- network/topology/randomizedTopology_test.go | 8 +- network/topology/topicBasedTopology.go | 17 +- network/topology/topicBasedTopology_test.go | 14 +- .../pubsub/authorized_sender_validator.go | 16 +- .../authorized_sender_validator_test.go | 21 +- utils/unittest/network/network.go | 21 +- 171 files changed, 1568 insertions(+), 1191 deletions(-) create mode 100644 module/mock/dht_metrics.go create mode 100644 module/mock/sealing_configs_getter.go create mode 100644 module/mock/sealing_configs_setter.go rename network/{ => channels}/channel.go (84%) rename network/{ => channels}/channels.go (99%) rename network/{ => channels}/channels_test.go (91%) create mode 100644 network/channels/topic.go create mode 100644 network/message/authorization.go create mode 100644 network/message/init.go delete mode 100644 network/message_authorization_config.go diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index 1d84856914b..56a6ed77b19 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -15,6 +15,7 @@ import ( "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/routing" pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow/protobuf/go/flow/access" "github.com/rs/zerolog" "github.com/spf13/pflag" @@ -428,7 +429,7 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionDataRequester() *FlowAccessN }). Component("execution data service", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { var err error - bs, err = node.Network.RegisterBlobService(network.ExecutionDataService, ds) + bs, err = node.Network.RegisterBlobService(channels.ExecutionDataService, ds) if err != nil { return nil, fmt.Errorf("could not register blob service: %w", err) } @@ -705,7 +706,7 @@ func (builder *FlowAccessNodeBuilder) enqueueRelayNetwork() { node.Network, builder.AccessNodeConfig.PublicNetworkConfig.Network, node.Logger, - []network.Channel{network.ReceiveBlocks}, + []channels.Channel{channels.ReceiveBlocks}, ) node.Network = relayNet return relayNet, nil @@ -839,7 +840,7 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { node.Network, node.Me, node.State, - network.RequestCollections, + channels.RequestCollections, filter.HasRole(flow.RoleCollection), func() flow.Entity { return &flow.Collection{} }, ) diff --git a/cmd/collection/main.go b/cmd/collection/main.go index 9b48eab267c..1a1c14f9e53 100644 --- a/cmd/collection/main.go +++ b/cmd/collection/main.go @@ -4,6 +4,7 @@ import ( "fmt" "time" + "github.com/onflow/flow-go/network/channels" "github.com/spf13/pflag" "github.com/onflow/flow-go/cmd/util/cmd/common" @@ -41,7 +42,6 @@ import ( epochpool "github.com/onflow/flow-go/module/mempool/epochs" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/synchronization" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/blocktimer" @@ -379,7 +379,7 @@ func main() { return coll, err } return provider.New(node.Logger, node.Metrics.Engine, node.Network, node.Me, node.State, - network.ProvideCollections, + channels.ProvideCollections, filter.HasRole(flow.RoleAccess, flow.RoleExecution), retrieve, ) diff --git a/cmd/consensus/main.go b/cmd/consensus/main.go index 1a1b19542c2..4968e545aac 100644 --- a/cmd/consensus/main.go +++ b/cmd/consensus/main.go @@ -10,6 +10,7 @@ import ( "path/filepath" "time" + "github.com/onflow/flow-go/network/channels" "github.com/spf13/pflag" client "github.com/onflow/flow-go-sdk/access/grpc" @@ -58,7 +59,6 @@ import ( "github.com/onflow/flow-go/module/synchronization" "github.com/onflow/flow-go/module/updatable_configs" "github.com/onflow/flow-go/module/validation" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/blocktimer" @@ -440,7 +440,7 @@ func main() { node.Network, node.Me, node.State, - network.RequestReceiptsByBlockID, + channels.RequestReceiptsByBlockID, filter.HasRole(flow.RoleExecution), func() flow.Entity { return &flow.ExecutionReceipt{} }, requester.WithRetryInitial(2*time.Second), diff --git a/cmd/execution_builder.go b/cmd/execution_builder.go index f2b6e48fff2..44be0aae37d 100644 --- a/cmd/execution_builder.go +++ b/cmd/execution_builder.go @@ -16,6 +16,7 @@ import ( badger "github.com/ipfs/go-ds-badger2" "github.com/onflow/cadence/runtime" "github.com/onflow/flow-core-contracts/lib/go/templates" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/shirou/gopsutil/v3/cpu" "github.com/shirou/gopsutil/v3/host" @@ -64,7 +65,6 @@ import ( "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/state_synchronization" chainsync "github.com/onflow/flow-go/module/synchronization" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/compressor" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/state/protocol" @@ -424,7 +424,7 @@ func (e *ExecutionNodeBuilder) LoadComponentsAndModules() { executionDataCIDCache = state_synchronization.NewExecutionDataCIDCache(executionDataCIDCacheSize) bs, err := node.Network.RegisterBlobService( - network.ExecutionDataService, + channels.ExecutionDataService, ds, p2p.WithBitswapOptions( bitswap.WithTaskComparator( @@ -613,7 +613,7 @@ func (e *ExecutionNodeBuilder) LoadComponentsAndModules() { Component("ingestion engine", func(node *NodeConfig) (module.ReadyDoneAware, error) { var err error collectionRequester, err = requester.New(node.Logger, node.Metrics.Engine, node.Network, node.Me, node.State, - network.RequestCollections, + channels.RequestCollections, filter.Any, func() flow.Entity { return &flow.Collection{} }, // we are manually triggering batches in execution, but lets still send off a batch once a minute, as a safety net for the sake of retries @@ -741,7 +741,7 @@ func (e *ExecutionNodeBuilder) LoadComponentsAndModules() { node.Network, node.Me, node.State, - network.ProvideReceiptsByBlockID, + channels.ProvideReceiptsByBlockID, filter.HasRole(flow.RoleConsensus), retrieve, ) diff --git a/cmd/observer/node_builder/observer_builder.go b/cmd/observer/node_builder/observer_builder.go index 5b8a78d55c3..1871d930cf9 100644 --- a/cmd/observer/node_builder/observer_builder.go +++ b/cmd/observer/node_builder/observer_builder.go @@ -17,6 +17,7 @@ import ( "github.com/libp2p/go-libp2p-core/routing" dht "github.com/libp2p/go-libp2p-kad-dht" p2ppubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/onflow/flow-go/network/channels" sdkcrypto "github.com/onflow/flow-go-sdk/crypto" "github.com/onflow/flow-go/apiproxy" @@ -465,7 +466,7 @@ func (builder *ObserverServiceBuilder) BuildExecutionDataRequester() *ObserverSe }). Component("execution data service", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { var err error - bs, err = node.Network.RegisterBlobService(network.ExecutionDataService, ds) + bs, err = node.Network.RegisterBlobService(channels.ExecutionDataService, ds) if err != nil { return nil, fmt.Errorf("could not register blob service: %w", err) } @@ -937,7 +938,7 @@ func (builder *ObserverServiceBuilder) enqueuePublicNetworkInit() { return nil, err } - builder.Network = converter.NewNetwork(net, network.SyncCommittee, network.PublicSyncCommittee) + builder.Network = converter.NewNetwork(net, channels.SyncCommittee, channels.PublicSyncCommittee) builder.Logger.Info().Msgf("network will run on address: %s", builder.BindAddr) diff --git a/cmd/util/cmd/epochs/cmd/templates.go b/cmd/util/cmd/epochs/cmd/templates.go index 7b05a95bb5f..97082f85a99 100644 --- a/cmd/util/cmd/epochs/cmd/templates.go +++ b/cmd/util/cmd/epochs/cmd/templates.go @@ -10,7 +10,7 @@ var deployEpochTransactionTemplate = ` // This transaction is needed to adjust the numViewsInEpoch and numViewsInStakingAuction // value based on the current block when epochs is deployed -transaction(name: String, +transaction(name: Name, currentEpochCounter: UInt64, // this value should be the number of views in the epoch, as computed from the // first and final views of the epoch info from the protocol state @@ -19,7 +19,7 @@ transaction(name: String, numViewsInDKGPhase: UInt64, numCollectorClusters: UInt16, FLOWsupplyIncreasePercentage: UFix64, - randomSource: String) { + randomSource: Name) { prepare(signer: AuthAccount) { @@ -51,6 +51,6 @@ transaction(name: String, // the below arguments are unused and are safe to be left empty collectorClusters: [] as [FlowClusterQC.Cluster], clusterQCs: [] as [FlowClusterQC.ClusterQC], - dkgPubKeys: [] as [String]) + dkgPubKeys: [] as [Name]) } }` diff --git a/consensus/integration/integration_test.go b/consensus/integration/integration_test.go index 4030e7a6f1e..c6034904f03 100644 --- a/consensus/integration/integration_test.go +++ b/consensus/integration/integration_test.go @@ -6,12 +6,12 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/util" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/utils/unittest" ) @@ -136,10 +136,10 @@ func chainViews(t *testing.T, node *Node) []uint64 { return low2high } -type BlockOrDelayFunc func(channel network.Channel, event interface{}, sender, receiver *Node) (bool, time.Duration) +type BlockOrDelayFunc func(channel channels.Channel, event interface{}, sender, receiver *Node) (bool, time.Duration) // block nothing -func blockNothing(channel network.Channel, event interface{}, sender, receiver *Node) (bool, time.Duration) { +func blockNothing(channel channels.Channel, event interface{}, sender, receiver *Node) (bool, time.Duration) { return false, 0 } @@ -149,7 +149,7 @@ func blockNodes(denyList ...*Node) BlockOrDelayFunc { for _, n := range denyList { blackList[n.id.ID()] = n } - return func(channel network.Channel, event interface{}, sender, receiver *Node) (bool, time.Duration) { + return func(channel channels.Channel, event interface{}, sender, receiver *Node) (bool, time.Duration) { block, notBlock := true, false if _, ok := blackList[sender.id.ID()]; ok { return block, 0 diff --git a/consensus/integration/network_test.go b/consensus/integration/network_test.go index f48d32a3ce7..037de473f91 100644 --- a/consensus/integration/network_test.go +++ b/consensus/integration/network_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/hashicorp/go-multierror" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" @@ -43,7 +44,7 @@ func (h *Hub) AddNetwork(originID flow.Identifier, node *Node) *Network { ctx: context.Background(), hub: h, originID: originID, - conduits: make(map[network.Channel]*Conduit), + conduits: make(map[channels.Channel]*Conduit), node: node, } h.networks[originID] = net @@ -62,13 +63,13 @@ type Network struct { hub *Hub node *Node originID flow.Identifier - conduits map[network.Channel]*Conduit + conduits map[channels.Channel]*Conduit mocknetwork.Network } // Register registers an Engine of the attached node to the channel via a Conduit, and returns the // Conduit instance. -func (n *Network) Register(channel network.Channel, engine network.MessageProcessor) (network.Conduit, error) { +func (n *Network) Register(channel channels.Channel, engine network.MessageProcessor) (network.Conduit, error) { ctx, cancel := context.WithCancel(n.ctx) con := &Conduit{ ctx: ctx, @@ -91,7 +92,7 @@ func (n *Network) Register(channel network.Channel, engine network.MessageProces } // unregister unregisters the engine associated with the given channel and closes the conduit queue. -func (n *Network) unregister(channel network.Channel) error { +func (n *Network) unregister(channel channels.Channel) error { con := n.conduits[channel] close(con.queue) delete(n.conduits, channel) @@ -101,7 +102,7 @@ func (n *Network) unregister(channel network.Channel) error { // submit is called when the attached Engine to the channel is sending an event to an // Engine attached to the same channel on another node or nodes. // This implementation uses unicast under the hood. -func (n *Network) submit(event interface{}, channel network.Channel, targetIDs ...flow.Identifier) error { +func (n *Network) submit(event interface{}, channel channels.Channel, targetIDs ...flow.Identifier) error { var sendErrors *multierror.Error for _, targetID := range targetIDs { if err := n.unicast(event, channel, targetID); err != nil { @@ -113,7 +114,7 @@ func (n *Network) submit(event interface{}, channel network.Channel, targetIDs . // unicast is called when the attached Engine to the channel is sending an event to a single target // Engine attached to the same channel on another node. -func (n *Network) unicast(event interface{}, channel network.Channel, targetID flow.Identifier) error { +func (n *Network) unicast(event interface{}, channel channels.Channel, targetID flow.Identifier) error { net, found := n.hub.networks[targetID] if !found { return fmt.Errorf("could not find target network on hub: %x", targetID) @@ -149,14 +150,14 @@ func (n *Network) unicast(event interface{}, channel network.Channel, targetID f // publish is called when the attached Engine is sending an event to a group of Engines attached to the // same channel on other nodes based on selector. // In this test helper implementation, publish uses submit method under the hood. -func (n *Network) publish(event interface{}, channel network.Channel, targetIDs ...flow.Identifier) error { +func (n *Network) publish(event interface{}, channel channels.Channel, targetIDs ...flow.Identifier) error { return n.submit(event, channel, targetIDs...) } // multicast is called when an Engine attached to the channel is sending an event to a number of randomly chosen // Engines attached to the same channel on other nodes. The targeted nodes are selected based on the selector. // In this test helper implementation, multicast uses submit method under the hood. -func (n *Network) multicast(event interface{}, channel network.Channel, num uint, targetIDs ...flow.Identifier) error { +func (n *Network) multicast(event interface{}, channel channels.Channel, num uint, targetIDs ...flow.Identifier) error { targetIDs = flow.Sample(num, targetIDs...) return n.submit(event, channel, targetIDs...) } @@ -165,7 +166,7 @@ type Conduit struct { ctx context.Context cancel context.CancelFunc net *Network - channel network.Channel + channel channels.Channel queue chan message } diff --git a/engine/access/access_test.go b/engine/access/access_test.go index 0cddf5a4a67..d9042692373 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/dgraph-io/badger/v2" + "github.com/onflow/flow-go/network/channels" accessproto "github.com/onflow/flow/protobuf/go/flow/access" entitiesproto "github.com/onflow/flow/protobuf/go/flow/entities" execproto "github.com/onflow/flow/protobuf/go/flow/execution" @@ -33,7 +34,6 @@ import ( "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/signature" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" protocol "github.com/onflow/flow-go/state/protocol/mock" storage "github.com/onflow/flow-go/storage/badger" @@ -555,7 +555,7 @@ func (suite *Suite) TestGetSealedTransaction() { // setup mocks originID := unittest.IdentifierFixture() conduit := new(mocknetwork.Conduit) - suite.net.On("Register", network.ReceiveReceipts, mock.Anything).Return(conduit, nil). + suite.net.On("Register", channels.ReceiveReceipts, mock.Anything).Return(conduit, nil). Once() suite.request.On("Request", mock.Anything, mock.Anything).Return() @@ -650,7 +650,7 @@ func (suite *Suite) TestGetSealedTransaction() { ingestEng.OnCollection(originID, &collection) for _, r := range executionReceipts { - err = ingestEng.Process(network.ReceiveReceipts, enNodeIDs[0], r) + err = ingestEng.Process(channels.ReceiveReceipts, enNodeIDs[0], r) require.NoError(suite.T(), err) } @@ -716,7 +716,7 @@ func (suite *Suite) TestExecuteScript() { require.NoError(suite.T(), err) conduit := new(mocknetwork.Conduit) - suite.net.On("Register", network.ReceiveReceipts, mock.Anything).Return(conduit, nil). + suite.net.On("Register", channels.ReceiveReceipts, mock.Anything).Return(conduit, nil). Once() // create the ingest engine ingestEng, err := ingestion.New(suite.log, suite.net, suite.state, suite.me, suite.request, blocks, headers, collections, diff --git a/engine/access/ingestion/engine.go b/engine/access/ingestion/engine.go index 51422f310e0..31fa164de41 100644 --- a/engine/access/ingestion/engine.go +++ b/engine/access/ingestion/engine.go @@ -8,6 +8,7 @@ import ( "fmt" "time" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/consensus/hotstuff/model" @@ -178,7 +179,7 @@ func New( Build() // register engine with the execution receipt provider - _, err = net.Register(network.ReceiveReceipts, e) + _, err = net.Register(channels.ReceiveReceipts, e) if err != nil { return nil, fmt.Errorf("could not register for results: %w", err) } @@ -323,7 +324,7 @@ func (e *Engine) SubmitLocal(event interface{}) { // Submit submits the given event from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. -func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { err := e.process(originID, event) if err != nil { engine.LogError(e.log, err) @@ -337,7 +338,7 @@ func (e *Engine) ProcessLocal(event interface{}) error { // Process processes the given event from the node with the given origin ID in // a blocking manner. It returns the potential processing error when done. -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { return e.process(originID, event) } diff --git a/engine/access/ingestion/engine_test.go b/engine/access/ingestion/engine_test.go index 02f1c477aaa..4158ec40e46 100644 --- a/engine/access/ingestion/engine_test.go +++ b/engine/access/ingestion/engine_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -24,7 +25,6 @@ import ( "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/signature" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" protocol "github.com/onflow/flow-go/state/protocol/mock" storerr "github.com/onflow/flow-go/storage" @@ -82,7 +82,7 @@ func (suite *Suite) SetupTest() { net := new(mocknetwork.Network) conduit := new(mocknetwork.Conduit) - net.On("Register", network.ReceiveReceipts, mock.Anything). + net.On("Register", channels.ReceiveReceipts, mock.Anything). Return(conduit, nil). Once() suite.request = new(module.Requester) diff --git a/engine/access/relay/engine.go b/engine/access/relay/engine.go index d7cfe5e59fe..8abeabd0943 100644 --- a/engine/access/relay/engine.go +++ b/engine/access/relay/engine.go @@ -3,6 +3,7 @@ package relay import ( "fmt" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine" @@ -12,21 +13,21 @@ import ( // Relay engine relays all the messages that are received to the given network for the corresponding channel type Engine struct { - unit *engine.Unit // used to manage concurrency & shutdown - log zerolog.Logger // used to log relevant actions with context - conduits map[network.Channel]network.Conduit // conduits for unstaked network + unit *engine.Unit // used to manage concurrency & shutdown + log zerolog.Logger // used to log relevant actions with context + conduits map[channels.Channel]network.Conduit // conduits for unstaked network } func New( log zerolog.Logger, - channels network.ChannelList, + channels channels.ChannelList, net network.Network, unstakedNet network.Network, ) (*Engine, error) { e := &Engine{ unit: engine.NewUnit(), log: log.With().Str("engine", "relay").Logger(), - conduits: make(map[network.Channel]network.Conduit), + conduits: make(map[channels.Channel]network.Conduit), } for _, channel := range channels { @@ -76,7 +77,7 @@ func (e *Engine) ProcessLocal(event interface{}) error { // Submit submits the given event from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. -func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { e.unit.Launch(func() { err := e.Process(channel, originID, event) if err != nil { @@ -88,13 +89,13 @@ func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, event // Process processes the given event from the node with the given origin ID // in a blocking manner. It returns the potential processing error when // done. -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { return e.unit.Do(func() error { return e.process(channel, originID, event) }) } -func (e *Engine) process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) process(channel channels.Channel, originID flow.Identifier, event interface{}) error { conduit, ok := e.conduits[channel] if !ok { diff --git a/engine/access/relay/engine_test.go b/engine/access/relay/engine_test.go index adbd3c4c805..bae3037f169 100644 --- a/engine/access/relay/engine_test.go +++ b/engine/access/relay/engine_test.go @@ -3,12 +3,12 @@ package relay import ( "testing" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/utils/unittest" ) @@ -17,17 +17,17 @@ type Suite struct { suite.Suite engine *Engine - channels network.ChannelList - conduits map[network.Channel]*mocknetwork.Conduit + channels channels.ChannelList + conduits map[channels.Channel]*mocknetwork.Conduit } func (suite *Suite) SetupTest() { - suite.channels = network.ChannelList{ - network.Channel("test-channel-1"), + suite.channels = channels.ChannelList{ + channels.Channel("test-channel-1"), } net := new(mocknetwork.Network) unstakedNet := new(mocknetwork.Network) - suite.conduits = make(map[network.Channel]*mocknetwork.Conduit) + suite.conduits = make(map[channels.Channel]*mocknetwork.Conduit) for _, channel := range suite.channels { con := new(mocknetwork.Conduit) diff --git a/engine/access/relay/example_test.go b/engine/access/relay/example_test.go index a3b40fca540..d7036a865d6 100644 --- a/engine/access/relay/example_test.go +++ b/engine/access/relay/example_test.go @@ -4,12 +4,12 @@ import ( "fmt" "math/rand" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine/access/relay" splitterNetwork "github.com/onflow/flow-go/engine/common/splitter/network" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network" testnet "github.com/onflow/flow-go/utils/unittest/network" ) @@ -28,7 +28,7 @@ func Example() { // create engines engineProcessFunc := func(engineName string) testnet.EngineProcessFunc { - return func(channel network.Channel, originID flow.Identifier, event interface{}) error { + return func(channel channels.Channel, originID flow.Identifier, event interface{}) error { fmt.Printf("Engine %v received message: channel=%v, originID=%v, event=%v\n", engineName, channel, originID, event) return nil } @@ -37,8 +37,8 @@ func Example() { barEngine := testnet.NewEngine().OnProcess(engineProcessFunc("Bar")) // register engines on the splitter network - fooChannel := network.Channel("foo-channel") - barChannel := network.Channel("bar-channel") + fooChannel := channels.Channel("foo-channel") + barChannel := channels.Channel("bar-channel") _, err := splitterNet.Register(fooChannel, fooEngine) if err != nil { fmt.Println(err) @@ -49,13 +49,13 @@ func Example() { } // create another network that messages will be relayed to - relayNet := testnet.NewNetwork().OnPublish(func(channel network.Channel, event interface{}, targetIDs ...flow.Identifier) error { + relayNet := testnet.NewNetwork().OnPublish(func(channel channels.Channel, event interface{}, targetIDs ...flow.Identifier) error { fmt.Printf("Message published to relay network: channel=%v, event=%v, targetIDs=%v\n", channel, event, targetIDs) return nil }) // create relay engine - channels := network.ChannelList{fooChannel, barChannel} + channels := channels.ChannelList{fooChannel, barChannel} _, err = relay.New(logger, channels, splitterNet, relayNet) if err != nil { fmt.Println(err) diff --git a/engine/access/rest/request/script_test.go b/engine/access/rest/request/script_test.go index ab74ae86ea5..359a32c19a9 100644 --- a/engine/access/rest/request/script_test.go +++ b/engine/access/rest/request/script_test.go @@ -31,7 +31,7 @@ func TestScript_InvalidParse(t *testing.T) { } func TestScript_ValidParse(t *testing.T) { - arg1 := []byte(`{"type": "String", "value": "hello" }`) + arg1 := []byte(`{"type": "Name", "value": "hello" }`) body := strings.NewReader(fmt.Sprintf( `{ "script": "%s", "arguments": ["%s"] }`, validBodyEncoded, diff --git a/engine/access/rest/scripts_test.go b/engine/access/rest/scripts_test.go index 7e3271c1d81..40b9b3fca7a 100644 --- a/engine/access/rest/scripts_test.go +++ b/engine/access/rest/scripts_test.go @@ -39,8 +39,8 @@ func scriptReq(id string, height string, body interface{}) *http.Request { } func TestScripts(t *testing.T) { - validCode := []byte(`pub fun main(foo: String): String { return foo }`) - validArgs := []byte(`{ "type": "String", "value": "hello world" }`) + validCode := []byte(`pub fun main(foo: Name): Name { return foo }`) + validArgs := []byte(`{ "type": "Name", "value": "hello world" }`) validBody := map[string]interface{}{ "script": util.ToBase64(validCode), "arguments": []string{util.ToBase64(validArgs)}, diff --git a/engine/collection/compliance/engine.go b/engine/collection/compliance/engine.go index 57b609db1bb..dbd575c4adb 100644 --- a/engine/collection/compliance/engine.go +++ b/engine/collection/compliance/engine.go @@ -6,6 +6,7 @@ import ( "fmt" "time" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/consensus/hotstuff/model" @@ -171,7 +172,7 @@ func NewEngine( } // register network conduit - conduit, err := net.Register(network.ChannelConsensusCluster(chainID), eng) + conduit, err := net.Register(channels.ChannelConsensusCluster(chainID), eng) if err != nil { return nil, fmt.Errorf("could not register engine: %w", err) } @@ -246,7 +247,7 @@ func (e *Engine) SubmitLocal(event interface{}) { // Submit submits the given event from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. -func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { err := e.Process(channel, originID, event) if err != nil { e.log.Fatal().Err(err).Msg("internal error processing event") @@ -260,7 +261,7 @@ func (e *Engine) ProcessLocal(event interface{}) error { // Process processes the given event from the node with the given origin ID in // a blocking manner. It returns the potential processing error when done. -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { err := e.messageHandler.Process(originID, event) if err != nil { if engine.IsIncompatibleInputTypeError(err) { diff --git a/engine/collection/compliance/engine_test.go b/engine/collection/compliance/engine_test.go index df7660799f9..8241d5a2d55 100644 --- a/engine/collection/compliance/engine_test.go +++ b/engine/collection/compliance/engine_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -121,7 +122,7 @@ func (cs *ComplianceSuite) SetupTest() { // set up network module mock cs.net = &mocknetwork.Network{} cs.net.On("Register", mock.Anything, mock.Anything).Return( - func(channel netint.Channel, engine netint.MessageProcessor) netint.Conduit { + func(channel channels.Channel, engine netint.MessageProcessor) netint.Conduit { return cs.con }, nil, @@ -250,7 +251,7 @@ func (cs *ComplianceSuite) TestSubmittingMultipleEntries() { originID := unittest.IdentifierFixture() voteCount := 15 - channel := netint.ChannelConsensusCluster(cs.clusterID) + channel := channels.ChannelConsensusCluster(cs.clusterID) var wg sync.WaitGroup wg.Add(1) diff --git a/engine/collection/ingest/engine.go b/engine/collection/ingest/engine.go index e350691c11a..1aa13ca91e0 100644 --- a/engine/collection/ingest/engine.go +++ b/engine/collection/ingest/engine.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/access" @@ -116,7 +117,7 @@ func New( AddWorker(e.processQueuedTransactions). Build() - conduit, err := net.Register(network.PushTransactions, e) + conduit, err := net.Register(channels.PushTransactions, e) if err != nil { return nil, fmt.Errorf("could not register engine: %w", err) } @@ -128,7 +129,7 @@ func New( // Process processes a transaction message from the network and enqueues the // message. Validation and ingestion is performed in the processQueuedTransactions // worker. -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { select { case <-e.ComponentManager.ShutdownSignal(): e.log.Warn().Msgf("received message from %x after shut down", originID) diff --git a/engine/collection/pusher/engine.go b/engine/collection/pusher/engine.go index 93d08a0f199..177ed2fb450 100644 --- a/engine/collection/pusher/engine.go +++ b/engine/collection/pusher/engine.go @@ -6,6 +6,7 @@ package pusher import ( "fmt" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine" @@ -46,7 +47,7 @@ func New(log zerolog.Logger, net network.Network, state protocol.State, engMetri transactions: transactions, } - conduit, err := net.Register(network.PushGuarantees, e) + conduit, err := net.Register(channels.PushGuarantees, e) if err != nil { return nil, fmt.Errorf("could not register for push protocol: %w", err) } @@ -79,7 +80,7 @@ func (e *Engine) SubmitLocal(event interface{}) { // Submit submits the given event from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. -func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { e.unit.Launch(func() { err := e.process(originID, event) if err != nil { @@ -97,7 +98,7 @@ func (e *Engine) ProcessLocal(event interface{}) error { // Process processes the given event from the node with the given origin ID in // a blocking manner. It returns the potential processing error when done. -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { return e.unit.Do(func() error { return e.process(originID, event) }) diff --git a/engine/collection/pusher/engine_test.go b/engine/collection/pusher/engine_test.go index 77d7bdb587d..f81a6aff08e 100644 --- a/engine/collection/pusher/engine_test.go +++ b/engine/collection/pusher/engine_test.go @@ -4,6 +4,7 @@ import ( "io/ioutil" "testing" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" @@ -14,7 +15,6 @@ import ( "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" protocol "github.com/onflow/flow-go/state/protocol/mock" storage "github.com/onflow/flow-go/storage/mock" @@ -109,7 +109,7 @@ func (suite *Suite) TestSubmitCollectionGuaranteeNonLocal() { msg := &messages.SubmitCollectionGuarantee{ Guarantee: *guarantee, } - err := suite.engine.Process(network.PushGuarantees, sender.NodeID, msg) + err := suite.engine.Process(channels.PushGuarantees, sender.NodeID, msg) suite.Require().Error(err) suite.conduit.AssertNumberOfCalls(suite.T(), "Multicast", 0) diff --git a/engine/collection/synchronization/engine.go b/engine/collection/synchronization/engine.go index 7d08f664e64..a738f5c7623 100644 --- a/engine/collection/synchronization/engine.go +++ b/engine/collection/synchronization/engine.go @@ -8,6 +8,7 @@ import ( "time" "github.com/hashicorp/go-multierror" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine" @@ -105,7 +106,7 @@ func New( } // register the engine with the network layer and store the conduit - con, err := net.Register(network.ChannelSyncCluster(chainID), e) + con, err := net.Register(channels.ChannelSyncCluster(chainID), e) if err != nil { return nil, fmt.Errorf("could not register engine: %w", err) } @@ -202,7 +203,7 @@ func (e *Engine) SubmitLocal(event interface{}) { // Submit submits the given event from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. -func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { err := e.Process(channel, originID, event) if err != nil { e.log.Fatal().Err(err).Msg("internal error processing event") @@ -216,7 +217,7 @@ func (e *Engine) ProcessLocal(event interface{}) error { // Process processes the given event from the node with the given origin ID in // a blocking manner. It returns the potential processing error when done. -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { err := e.process(originID, event) if err != nil { if engine.IsIncompatibleInputTypeError(err) { diff --git a/engine/collection/synchronization/engine_test.go b/engine/collection/synchronization/engine_test.go index f3fc1a6a31d..9ae12aab7b5 100644 --- a/engine/collection/synchronization/engine_test.go +++ b/engine/collection/synchronization/engine_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -72,8 +73,8 @@ func (ss *SyncSuite) SetupTest() { // set up the network module mock ss.net = &mocknetwork.Network{} - ss.net.On("Register", netint.ChannelSyncCluster(clusterID), mock.Anything).Return( - func(network netint.Channel, engine netint.MessageProcessor) netint.Conduit { + ss.net.On("Register", channels.ChannelSyncCluster(clusterID), mock.Anything).Return( + func(network channels.Channel, engine netint.MessageProcessor) netint.Conduit { return ss.con }, nil, @@ -437,7 +438,7 @@ func (ss *SyncSuite) TestProcessingMultipleItems() { Height: uint64(1000 + i), } ss.core.On("HandleHeight", mock.Anything, msg.Height).Once() - require.NoError(ss.T(), ss.e.Process(netint.SyncCommittee, originID, msg)) + require.NoError(ss.T(), ss.e.Process(channels.SyncCommittee, originID, msg)) } finalHeight := ss.head.Height @@ -452,7 +453,7 @@ func (ss *SyncSuite) TestProcessingMultipleItems() { ss.core.On("HandleHeight", mock.Anything, msg.Height).Once() ss.con.On("Unicast", mock.Anything, mock.Anything).Return(nil) - require.NoError(ss.T(), ss.e.Process(netint.SyncCommittee, originID, msg)) + require.NoError(ss.T(), ss.e.Process(channels.SyncCommittee, originID, msg)) } // give at least some time to process items diff --git a/engine/collection/synchronization/request_handler.go b/engine/collection/synchronization/request_handler.go index 339026be120..ddb98d1fb6f 100644 --- a/engine/collection/synchronization/request_handler.go +++ b/engine/collection/synchronization/request_handler.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine" @@ -87,7 +88,7 @@ func (r *RequestHandlerEngine) SubmitLocal(event interface{}) { // Submit submits the given event from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. -func (r *RequestHandlerEngine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (r *RequestHandlerEngine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { err := r.Process(channel, originID, event) if err != nil { r.log.Fatal().Err(err).Msg("internal error processing event") @@ -101,7 +102,7 @@ func (r *RequestHandlerEngine) ProcessLocal(event interface{}) error { // Process processes the given event from the node with the given origin ID in // a blocking manner. It returns the potential processing error when done. -func (r *RequestHandlerEngine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (r *RequestHandlerEngine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { err := r.process(originID, event) if err != nil { if engine.IsIncompatibleInputTypeError(err) { diff --git a/engine/collection/test/cluster_switchover_test.go b/engine/collection/test/cluster_switchover_test.go index b5b6dc6066a..1f74030a453 100644 --- a/engine/collection/test/cluster_switchover_test.go +++ b/engine/collection/test/cluster_switchover_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -17,7 +18,6 @@ import ( "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/util" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/stub" "github.com/onflow/flow-go/state/cluster" @@ -113,7 +113,7 @@ func NewClusterSwitchoverTestCase(t *testing.T, conf ClusterSwitchoverTestConf) tc.root, ) tc.sn = new(mocknetwork.Engine) - _, err = consensus.Net.Register(network.ReceiveGuarantees, tc.sn) + _, err = consensus.Net.Register(channels.ReceiveGuarantees, tc.sn) require.NoError(tc.T(), err) // create an epoch builder hooked to each collector's protocol state diff --git a/engine/common/follower/engine.go b/engine/common/follower/engine.go index ac6ed021ef5..0cb54088bfd 100644 --- a/engine/common/follower/engine.go +++ b/engine/common/follower/engine.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/hashicorp/go-multierror" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine" @@ -80,7 +81,7 @@ func New( tracer: tracer, } - con, err := net.Register(network.ReceiveBlocks, e) + con, err := net.Register(channels.ReceiveBlocks, e) if err != nil { return nil, fmt.Errorf("could not register engine to network: %w", err) } @@ -119,7 +120,7 @@ func (e *Engine) SubmitLocal(event interface{}) { // Submit submits the given event from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. -func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { e.unit.Launch(func() { err := e.Process(channel, originID, event) if err != nil { @@ -137,7 +138,7 @@ func (e *Engine) ProcessLocal(event interface{}) error { // Process processes the given event from the node with the given origin ID in // a blocking manner. It returns the potential processing error when done. -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { return e.unit.Do(func() error { return e.process(originID, event) }) diff --git a/engine/common/follower/engine_test.go b/engine/common/follower/engine_test.go index 5b526cd555d..f780b1b8781 100644 --- a/engine/common/follower/engine_test.go +++ b/engine/common/follower/engine_test.go @@ -3,6 +3,7 @@ package follower_test import ( "testing" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -15,7 +16,6 @@ import ( metrics "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/trace" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" protocol "github.com/onflow/flow-go/state/protocol/mock" realstorage "github.com/onflow/flow-go/storage" @@ -109,7 +109,7 @@ func (suite *Suite) TestHandlePendingBlock() { // submit the block proposal := unittest.ProposalFromBlock(&block) - err := suite.engine.Process(network.ReceiveBlocks, originID, proposal) + err := suite.engine.Process(channels.ReceiveBlocks, originID, proposal) assert.Nil(suite.T(), err) suite.follower.AssertNotCalled(suite.T(), "SubmitProposal", mock.Anything) @@ -145,7 +145,7 @@ func (suite *Suite) TestHandleProposal() { // submit the block proposal := unittest.ProposalFromBlock(&block) - err := suite.engine.Process(network.ReceiveBlocks, originID, proposal) + err := suite.engine.Process(channels.ReceiveBlocks, originID, proposal) assert.Nil(suite.T(), err) suite.follower.AssertExpectations(suite.T()) @@ -168,7 +168,7 @@ func (suite *Suite) TestHandleProposalSkipProposalThreshold() { // submit the block proposal := unittest.ProposalFromBlock(&block) - err := suite.engine.Process(network.ReceiveBlocks, originID, proposal) + err := suite.engine.Process(channels.ReceiveBlocks, originID, proposal) assert.NoError(suite.T(), err) // block should be dropped - not added to state or cache @@ -224,7 +224,7 @@ func (suite *Suite) TestHandleProposalWithPendingChildren() { // submit the block proposal proposal := unittest.ProposalFromBlock(&block) - err := suite.engine.Process(network.ReceiveBlocks, originID, proposal) + err := suite.engine.Process(channels.ReceiveBlocks, originID, proposal) assert.Nil(suite.T(), err) suite.follower.AssertExpectations(suite.T()) diff --git a/engine/common/provider/engine.go b/engine/common/provider/engine.go index 99e1890c22b..2be3c068a1e 100644 --- a/engine/common/provider/engine.go +++ b/engine/common/provider/engine.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/vmihailenco/msgpack" @@ -35,7 +36,7 @@ type Engine struct { me module.Local state protocol.State con network.Conduit - channel network.Channel + channel channels.Channel selector flow.IdentityFilter retrieve RetrieveFunc } @@ -44,7 +45,7 @@ type Engine struct { // from a node within the set obtained by applying the provided selector filter. It uses the injected retrieve function // to manage the fullfilment of these requests. func New(log zerolog.Logger, metrics module.EngineMetrics, net network.Network, me module.Local, state protocol.State, - channel network.Channel, selector flow.IdentityFilter, retrieve RetrieveFunc) (*Engine, error) { + channel channels.Channel, selector flow.IdentityFilter, retrieve RetrieveFunc) (*Engine, error) { // make sure we don't respond to requests sent by self or unauthorized nodes selector = filter.And( @@ -101,7 +102,7 @@ func (e *Engine) SubmitLocal(message interface{}) { // Submit submits the given message from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. -func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, message interface{}) { +func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, message interface{}) { e.unit.Launch(func() { err := e.Process(channel, originID, message) if err != nil { @@ -119,7 +120,7 @@ func (e *Engine) ProcessLocal(message interface{}) error { // Process processes the given message from the node with the given origin ID in // a blocking manner. It returns the potential processing error when done. -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, message interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, message interface{}) error { return e.unit.Do(func() error { return e.process(originID, message) }) diff --git a/engine/common/requester/engine.go b/engine/common/requester/engine.go index f0c9600398a..88861de558a 100644 --- a/engine/common/requester/engine.go +++ b/engine/common/requester/engine.go @@ -6,6 +6,7 @@ import ( "math/rand" "time" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/vmihailenco/msgpack" @@ -40,7 +41,7 @@ type Engine struct { me module.Local state protocol.State con network.Conduit - channel network.Channel + channel channels.Channel selector flow.IdentityFilter create CreateFunc handle HandleFunc @@ -52,7 +53,7 @@ type Engine struct { // within the set obtained by applying the provided selector filter. The options allow customization of the parameters // related to the batch and retry logic. func New(log zerolog.Logger, metrics module.EngineMetrics, net network.Network, me module.Local, state protocol.State, - channel network.Channel, selector flow.IdentityFilter, create CreateFunc, options ...OptionFunc) (*Engine, error) { + channel channels.Channel, selector flow.IdentityFilter, create CreateFunc, options ...OptionFunc) (*Engine, error) { // initialize the default config cfg := Config{ @@ -113,7 +114,7 @@ func New(log zerolog.Logger, metrics module.EngineMetrics, net network.Network, } // register the engine with the network layer and store the conduit - con, err := net.Register(network.Channel(channel), e) + con, err := net.Register(channels.Channel(channel), e) if err != nil { return nil, fmt.Errorf("could not register engine: %w", err) } @@ -162,7 +163,7 @@ func (e *Engine) SubmitLocal(message interface{}) { // Submit submits the given message from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. -func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, message interface{}) { +func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, message interface{}) { e.unit.Launch(func() { err := e.Process(channel, originID, message) if err != nil { @@ -180,7 +181,7 @@ func (e *Engine) ProcessLocal(message interface{}) error { // Process processes the given message from the node with the given origin ID in // a blocking manner. It returns the potential processing error when done. -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, message interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, message interface{}) error { return e.unit.Do(func() error { return e.process(originID, message) }) diff --git a/engine/common/splitter/engine.go b/engine/common/splitter/engine.go index ae804e1e506..1a8c0c9681a 100644 --- a/engine/common/splitter/engine.go +++ b/engine/common/splitter/engine.go @@ -5,6 +5,7 @@ import ( "sync" "github.com/hashicorp/go-multierror" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine" @@ -19,13 +20,13 @@ type Engine struct { unit *engine.Unit // used to manage concurrency & shutdown log zerolog.Logger // used to log relevant actions with context engines map[network.MessageProcessor]struct{} // stores registered engines - channel network.Channel // the channel that this splitter listens on + channel channels.Channel // the channel that this splitter listens on } // New creates a new splitter engine. func New( log zerolog.Logger, - channel network.Channel, + channel channels.Channel, ) *Engine { return &Engine{ unit: engine.NewUnit(), @@ -70,7 +71,7 @@ func (e *Engine) Done() <-chan struct{} { // Process processes the given event from the node with the given origin ID // in a blocking manner. It returns the potential processing error when // done. -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { return e.unit.Do(func() error { if channel != e.channel { return fmt.Errorf("received event on unknown channel %s", channel) diff --git a/engine/common/splitter/engine_test.go b/engine/common/splitter/engine_test.go index 3948ab1c702..c2445dc9231 100644 --- a/engine/common/splitter/engine_test.go +++ b/engine/common/splitter/engine_test.go @@ -6,11 +6,11 @@ import ( "testing" "github.com/hashicorp/go-multierror" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/engine/common/splitter" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/utils/unittest" ) @@ -26,12 +26,12 @@ func getEvent() interface{} { type Suite struct { suite.Suite - channel network.Channel + channel channels.Channel engine *splitter.Engine } func (suite *Suite) SetupTest() { - suite.channel = network.Channel("test-channel") + suite.channel = channels.Channel("test-channel") suite.engine = splitter.New(zerolog.Logger{}, suite.channel) } @@ -94,7 +94,7 @@ func (suite *Suite) TestProcessUnknownChannel() { id := unittest.IdentifierFixture() event := getEvent() - unknownChannel := network.Channel("unknown-chan") + unknownChannel := channels.Channel("unknown-chan") engine := new(mocknetwork.Engine) diff --git a/engine/common/splitter/network/example_test.go b/engine/common/splitter/network/example_test.go index c5aad51eaee..b883ecb7bf3 100644 --- a/engine/common/splitter/network/example_test.go +++ b/engine/common/splitter/network/example_test.go @@ -4,11 +4,11 @@ import ( "fmt" "math/rand" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" splitterNetwork "github.com/onflow/flow-go/engine/common/splitter/network" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network" testnet "github.com/onflow/flow-go/utils/unittest/network" ) @@ -27,7 +27,7 @@ func Example() { // create engines engineProcessFunc := func(engineID int) testnet.EngineProcessFunc { - return func(channel network.Channel, originID flow.Identifier, event interface{}) error { + return func(channel channels.Channel, originID flow.Identifier, event interface{}) error { fmt.Printf("Engine %d received message: channel=%v, originID=%v, event=%v\n", engineID, channel, originID, event) return nil } @@ -37,7 +37,7 @@ func Example() { engine3 := testnet.NewEngine().OnProcess(engineProcessFunc(3)) // register engines with splitter network - channel := network.Channel("foo-channel") + channel := channels.Channel("foo-channel") _, err := splitterNet.Register(channel, engine1) if err != nil { fmt.Println(err) diff --git a/engine/common/splitter/network/network.go b/engine/common/splitter/network/network.go index 8275ceb39d4..a11a650ee2e 100644 --- a/engine/common/splitter/network/network.go +++ b/engine/common/splitter/network/network.go @@ -7,6 +7,7 @@ import ( "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p-core/protocol" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" splitterEngine "github.com/onflow/flow-go/engine/common/splitter" @@ -26,8 +27,8 @@ type Network struct { net network.Network mu sync.RWMutex log zerolog.Logger - splitters map[network.Channel]*splitterEngine.Engine // stores splitters for each channel - conduits map[network.Channel]network.Conduit // stores conduits for all registered channels + splitters map[channels.Channel]*splitterEngine.Engine // stores splitters for each channel + conduits map[channels.Channel]network.Conduit // stores conduits for all registered channels *component.ComponentManager } @@ -40,8 +41,8 @@ func NewNetwork( ) *Network { n := &Network{ net: net, - splitters: make(map[network.Channel]*splitterEngine.Engine), - conduits: make(map[network.Channel]network.Conduit), + splitters: make(map[channels.Channel]*splitterEngine.Engine), + conduits: make(map[channels.Channel]network.Conduit), log: log, } @@ -61,7 +62,7 @@ func NewNetwork( return n } -func (n *Network) RegisterBlobService(channel network.Channel, store datastore.Batching, opts ...network.BlobServiceOption) (network.BlobService, error) { +func (n *Network) RegisterBlobService(channel channels.Channel, store datastore.Batching, opts ...network.BlobServiceOption) (network.BlobService, error) { return n.net.RegisterBlobService(channel, store, opts...) } @@ -72,7 +73,7 @@ func (n *Network) RegisterPingService(pid protocol.ID, provider network.PingInfo // Register will subscribe the given engine with the spitter on the given channel, and all registered // engines will be notified with incoming messages on the channel. // The returned Conduit can be used to send messages to engines on other nodes subscribed to the same channel -func (n *Network) Register(channel network.Channel, engine network.MessageProcessor) (network.Conduit, error) { +func (n *Network) Register(channel channels.Channel, engine network.MessageProcessor) (network.Conduit, error) { n.mu.Lock() defer n.mu.Unlock() diff --git a/engine/common/splitter/network/network_test.go b/engine/common/splitter/network/network_test.go index 775ccfec350..e6817092af7 100644 --- a/engine/common/splitter/network/network_test.go +++ b/engine/common/splitter/network/network_test.go @@ -3,6 +3,7 @@ package network_test import ( "testing" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" @@ -26,16 +27,16 @@ type Suite struct { con *mocknetwork.Conduit net *splitternetwork.Network - engines map[network.Channel]network.MessageProcessor + engines map[channels.Channel]network.MessageProcessor } func (suite *Suite) SetupTest() { net := new(mocknetwork.Network) suite.con = new(mocknetwork.Conduit) - suite.engines = make(map[network.Channel]network.MessageProcessor) + suite.engines = make(map[channels.Channel]network.MessageProcessor) net.On("Register", mock.AnythingOfType("network.Channel"), mock.Anything).Run(func(args mock.Arguments) { - channel, _ := args.Get(0).(network.Channel) + channel, _ := args.Get(0).(channels.Channel) engine, ok := args.Get(1).(network.MessageProcessor) suite.Assert().True(ok) suite.engines[channel] = engine @@ -55,9 +56,9 @@ func (suite *Suite) TestHappyPath() { id := unittest.IdentifierFixture() event := getEvent() - chan1 := network.Channel("test-chan-1") - chan2 := network.Channel("test-chan-2") - chan3 := network.Channel("test-chan-3") + chan1 := channels.Channel("test-chan-1") + chan2 := channels.Channel("test-chan-2") + chan3 := channels.Channel("test-chan-3") engine1 := new(mocknetwork.Engine) engine2 := new(mocknetwork.Engine) diff --git a/engine/common/synchronization/engine.go b/engine/common/synchronization/engine.go index 2798d681cda..5540dc2ef5a 100644 --- a/engine/common/synchronization/engine.go +++ b/engine/common/synchronization/engine.go @@ -8,6 +8,7 @@ import ( "time" "github.com/hashicorp/go-multierror" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine" @@ -100,7 +101,7 @@ func New( } // register the engine with the network layer and store the conduit - con, err := net.Register(network.SyncCommittee, e) + con, err := net.Register(channels.SyncCommittee, e) if err != nil { return nil, fmt.Errorf("could not register engine: %w", err) } @@ -200,7 +201,7 @@ func (e *Engine) SubmitLocal(event interface{}) { // Submit submits the given event from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. -func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { err := e.Process(channel, originID, event) if err != nil { e.log.Fatal().Err(err).Msg("internal error processing event") @@ -214,7 +215,7 @@ func (e *Engine) ProcessLocal(event interface{}) error { // Process processes the given event from the node with the given origin ID in // a blocking manner. It returns the potential processing error when done. -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { err := e.process(originID, event) if err != nil { if engine.IsIncompatibleInputTypeError(err) { diff --git a/engine/common/synchronization/engine_test.go b/engine/common/synchronization/engine_test.go index 22bfa9faf6b..04eb52a90de 100644 --- a/engine/common/synchronization/engine_test.go +++ b/engine/common/synchronization/engine_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -79,7 +80,7 @@ func (ss *SyncSuite) SetupTest() { // set up the network module mock ss.net = &mocknetwork.Network{} ss.net.On("Register", mock.Anything, mock.Anything).Return( - func(channel netint.Channel, engine netint.MessageProcessor) netint.Conduit { + func(channel channels.Channel, engine netint.MessageProcessor) netint.Conduit { return ss.con }, nil, @@ -489,7 +490,7 @@ func (ss *SyncSuite) TestProcessingMultipleItems() { Height: uint64(1000 + i), } ss.core.On("HandleHeight", mock.Anything, msg.Height).Once() - require.NoError(ss.T(), ss.e.Process(netint.SyncCommittee, originID, msg)) + require.NoError(ss.T(), ss.e.Process(channels.SyncCommittee, originID, msg)) } finalHeight := ss.head.Height @@ -504,7 +505,7 @@ func (ss *SyncSuite) TestProcessingMultipleItems() { ss.core.On("HandleHeight", mock.Anything, msg.Height).Once() ss.con.On("Unicast", mock.Anything, mock.Anything).Return(nil) - require.NoError(ss.T(), ss.e.Process(netint.SyncCommittee, originID, msg)) + require.NoError(ss.T(), ss.e.Process(channels.SyncCommittee, originID, msg)) } // give at least some time to process items diff --git a/engine/common/synchronization/request_handler.go b/engine/common/synchronization/request_handler.go index bc88771ee08..b5dabe4ac64 100644 --- a/engine/common/synchronization/request_handler.go +++ b/engine/common/synchronization/request_handler.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine" @@ -13,7 +14,6 @@ import ( "github.com/onflow/flow-go/module/lifecycle" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/synchronization" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/storage" ) @@ -80,7 +80,7 @@ func NewRequestHandler( // Process processes the given event from the node with the given origin ID in // a blocking manner. It returns the potential processing error when done. -func (r *RequestHandler) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (r *RequestHandler) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { err := r.process(originID, event) if err != nil { if engine.IsIncompatibleInputTypeError(err) { diff --git a/engine/common/synchronization/request_handler_engine.go b/engine/common/synchronization/request_handler_engine.go index 5b75b4c7cf4..75ca9dd5d17 100644 --- a/engine/common/synchronization/request_handler_engine.go +++ b/engine/common/synchronization/request_handler_engine.go @@ -3,6 +3,7 @@ package synchronization import ( "fmt" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" @@ -62,7 +63,7 @@ func NewRequestHandlerEngine( ) (*RequestHandlerEngine, error) { e := &RequestHandlerEngine{} - con, err := net.Register(network.PublicSyncCommittee, e) + con, err := net.Register(channels.PublicSyncCommittee, e) if err != nil { return nil, fmt.Errorf("could not register engine: %w", err) } @@ -81,7 +82,7 @@ func NewRequestHandlerEngine( return e, nil } -func (r *RequestHandlerEngine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (r *RequestHandlerEngine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { return r.requestHandler.Process(channel, originID, event) } diff --git a/engine/consensus/compliance/core_test.go b/engine/consensus/compliance/core_test.go index e984214baf0..98d59f4103c 100644 --- a/engine/consensus/compliance/core_test.go +++ b/engine/consensus/compliance/core_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -185,7 +186,7 @@ func (cs *ComplianceCoreSuite) SetupTest() { // set up network module mock cs.net = &mocknetwork.Network{} cs.net.On("Register", mock.Anything, mock.Anything).Return( - func(channel netint.Channel, engine netint.MessageProcessor) netint.Conduit { + func(channel channels.Channel, engine netint.MessageProcessor) netint.Conduit { return cs.con }, nil, diff --git a/engine/consensus/compliance/engine.go b/engine/consensus/compliance/engine.go index 6bf87ed6874..1141f38ee85 100644 --- a/engine/consensus/compliance/engine.go +++ b/engine/consensus/compliance/engine.go @@ -6,6 +6,7 @@ import ( "fmt" "time" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/consensus/hotstuff/model" @@ -152,7 +153,7 @@ func NewEngine( } // register the core with the network layer and store the conduit - eng.con, err = net.Register(network.ConsensusCommittee, eng) + eng.con, err = net.Register(channels.ConsensusCommittee, eng) if err != nil { return nil, fmt.Errorf("could not register core: %w", err) } @@ -220,7 +221,7 @@ func (e *Engine) SubmitLocal(event interface{}) { // Submit submits the given event from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. -func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { err := e.Process(channel, originID, event) if err != nil { e.log.Fatal().Err(err).Msg("internal error processing event") @@ -234,7 +235,7 @@ func (e *Engine) ProcessLocal(event interface{}) error { // Process processes the given event from the node with the given origin ID in // a blocking manner. It returns the potential processing error when done. -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { err := e.messageHandler.Process(originID, event) if err != nil { if engine.IsIncompatibleInputTypeError(err) { diff --git a/engine/consensus/compliance/engine_test.go b/engine/consensus/compliance/engine_test.go index 54ccfbbf338..c98d1d5f724 100644 --- a/engine/consensus/compliance/engine_test.go +++ b/engine/consensus/compliance/engine_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -15,7 +16,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" modulemock "github.com/onflow/flow-go/module/mock" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/utils/unittest" ) @@ -173,7 +173,7 @@ func (cs *ComplianceSuite) TestSubmittingMultipleEntries() { SigData: vote.SigData, }).Return().Once() // execute the vote submission - _ = cs.engine.Process(network.ConsensusCommittee, originID, &vote) + _ = cs.engine.Process(channels.ConsensusCommittee, originID, &vote) } wg.Done() }() @@ -187,7 +187,7 @@ func (cs *ComplianceSuite) TestSubmittingMultipleEntries() { // store the data for retrieval cs.headerDB[block.Header.ParentID] = cs.head cs.hotstuff.On("SubmitProposal", block.Header, cs.head.View).Return() - _ = cs.engine.Process(network.ConsensusCommittee, originID, proposal) + _ = cs.engine.Process(channels.ConsensusCommittee, originID, proposal) wg.Done() }() diff --git a/engine/consensus/dkg/messaging_engine.go b/engine/consensus/dkg/messaging_engine.go index aecb54cd14e..e7a9fb5c8f7 100644 --- a/engine/consensus/dkg/messaging_engine.go +++ b/engine/consensus/dkg/messaging_engine.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/onflow/flow-go/network/channels" "github.com/sethvargo/go-retry" "github.com/rs/zerolog" @@ -56,7 +57,7 @@ func NewMessagingEngine( } var err error - eng.conduit, err = net.Register(network.DKGCommittee, &eng) + eng.conduit, err = net.Register(channels.DKGCommittee, &eng) if err != nil { return nil, fmt.Errorf("could not register dkg network engine: %w", err) } @@ -90,7 +91,7 @@ func (e *MessagingEngine) SubmitLocal(event interface{}) { } // Submit implements the network Engine interface -func (e *MessagingEngine) Submit(_ network.Channel, originID flow.Identifier, event interface{}) { +func (e *MessagingEngine) Submit(_ channels.Channel, originID flow.Identifier, event interface{}) { e.unit.Launch(func() { err := e.process(originID, event) if engine.IsInvalidInputError(err) { @@ -114,7 +115,7 @@ func (e *MessagingEngine) ProcessLocal(event interface{}) error { } // Process implements the network Engine interface -func (e *MessagingEngine) Process(_ network.Channel, originID flow.Identifier, event interface{}) error { +func (e *MessagingEngine) Process(_ channels.Channel, originID flow.Identifier, event interface{}) error { return e.unit.Do(func() error { return e.process(originID, event) }) diff --git a/engine/consensus/dkg/messaging_engine_test.go b/engine/consensus/dkg/messaging_engine_test.go index 04c1d30e1ae..3261680b1f8 100644 --- a/engine/consensus/dkg/messaging_engine_test.go +++ b/engine/consensus/dkg/messaging_engine_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -11,7 +12,6 @@ import ( msg "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/dkg" module "github.com/onflow/flow-go/module/mock" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/utils/unittest" ) @@ -93,7 +93,7 @@ func TestForwardIncomingMessages(t *testing.T) { close(doneCh) }() - err := e.Process(network.DKGCommittee, originID, &expectedMsg.DKGMessage) + err := e.Process(channels.DKGCommittee, originID, &expectedMsg.DKGMessage) require.NoError(t, err) unittest.RequireCloseBefore(t, doneCh, time.Second, "message not received") diff --git a/engine/consensus/ingestion/engine.go b/engine/consensus/ingestion/engine.go index 5e10b614700..3205c242a48 100644 --- a/engine/consensus/ingestion/engine.go +++ b/engine/consensus/ingestion/engine.go @@ -6,6 +6,7 @@ import ( "context" "fmt" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine" @@ -100,7 +101,7 @@ func New( e.Component = componentManagerBuilder.Build() // register the engine with the network layer and store the conduit - con, err := net.Register(network.ReceiveGuarantees, e) + con, err := net.Register(channels.ReceiveGuarantees, e) if err != nil { return nil, fmt.Errorf("could not register engine: %w", err) } @@ -119,7 +120,7 @@ func (e *Engine) SubmitLocal(event interface{}) { // Submit submits the given event from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. -func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { err := e.Process(channel, originID, event) if err != nil { e.log.Fatal().Err(err).Msg("internal error processing event") @@ -133,7 +134,7 @@ func (e *Engine) ProcessLocal(event interface{}) error { // Process processes the given event from the node with the given origin ID in // a blocking manner. It returns error only in unexpected scenario. -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { err := e.messageHandler.Process(originID, event) if err != nil { if engine.IsIncompatibleInputTypeError(err) { diff --git a/engine/consensus/ingestion/engine_test.go b/engine/consensus/ingestion/engine_test.go index e913cd49fee..64263994ce7 100644 --- a/engine/consensus/ingestion/engine_test.go +++ b/engine/consensus/ingestion/engine_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -43,8 +44,8 @@ func (s *IngestionSuite) SetupTest() { // set up network module mock s.net = &mocknetwork.Network{} - s.net.On("Register", netint.ReceiveGuarantees, mock.Anything).Return( - func(channel netint.Channel, engine netint.MessageProcessor) netint.Conduit { + s.net.On("Register", channels.ReceiveGuarantees, mock.Anything).Return( + func(channel channels.Channel, engine netint.MessageProcessor) netint.Conduit { return s.con }, nil, @@ -90,7 +91,7 @@ func (s *IngestionSuite) TestSubmittingMultipleEntries() { }).Return(true) // execute the vote submission - _ = s.ingest.Process(netint.ProvideCollections, originID, guarantee) + _ = s.ingest.Process(channels.ProvideCollections, originID, guarantee) } wg.Done() }() diff --git a/engine/consensus/matching/engine.go b/engine/consensus/matching/engine.go index 6ba76d2b6e3..f9208458f50 100644 --- a/engine/consensus/matching/engine.go +++ b/engine/consensus/matching/engine.go @@ -3,6 +3,7 @@ package matching import ( "fmt" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/consensus/hotstuff/model" @@ -84,7 +85,7 @@ func NewEngine( } // register engine with the receipt provider - _, err = net.Register(network.ReceiveReceipts, e) + _, err = net.Register(channels.ReceiveReceipts, e) if err != nil { return nil, fmt.Errorf("could not register for results: %w", err) } @@ -119,7 +120,7 @@ func (e *Engine) SubmitLocal(event interface{}) { // Submit submits the given event from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. -func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { err := e.Process(channel, originID, event) if err != nil { e.log.Fatal().Err(err).Msg("internal error processing event") @@ -133,7 +134,7 @@ func (e *Engine) ProcessLocal(event interface{}) error { // Process processes the given event from the node with the given origin ID in // a blocking manner. It returns the potential processing error when done. -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { err := e.process(originID, event) if err != nil { if engine.IsIncompatibleInputTypeError(err) { diff --git a/engine/consensus/matching/engine_test.go b/engine/consensus/matching/engine_test.go index 7f00062c5a6..ee16f16201b 100644 --- a/engine/consensus/matching/engine_test.go +++ b/engine/consensus/matching/engine_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -15,7 +16,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" mockmodule "github.com/onflow/flow-go/module/mock" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" mockprotocol "github.com/onflow/flow-go/state/protocol/mock" mockstorage "github.com/onflow/flow-go/storage/mock" @@ -122,7 +122,7 @@ func (s *MatchingEngineSuite) TestMultipleProcessingItems() { go func() { defer wg.Done() for _, receipt := range receipts { - err := s.engine.Process(network.ReceiveReceipts, originID, receipt) + err := s.engine.Process(channels.ReceiveReceipts, originID, receipt) s.Require().NoError(err, "should add receipt and result to mempool if valid") } }() diff --git a/engine/consensus/provider/engine.go b/engine/consensus/provider/engine.go index 58c21d901c7..1d56ef42b8e 100644 --- a/engine/consensus/provider/engine.go +++ b/engine/consensus/provider/engine.go @@ -5,6 +5,7 @@ package provider import ( "fmt" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine" @@ -54,7 +55,7 @@ func New( } // register the engine with the network layer and store the conduit - con, err := net.Register(network.PushBlocks, e) + con, err := net.Register(channels.PushBlocks, e) if err != nil { return nil, fmt.Errorf("could not register engine: %w", err) } @@ -90,7 +91,7 @@ func (e *Engine) SubmitLocal(event interface{}) { // Submit submits the given event from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. -func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { e.unit.Launch(func() { err := e.Process(channel, originID, event) if err != nil { @@ -108,7 +109,7 @@ func (e *Engine) ProcessLocal(event interface{}) error { // Process processes the given event from the node with the given origin ID in // a blocking manner. It returns the potential processing error when done. -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { return e.unit.Do(func() error { return e.process(originID, event) }) diff --git a/engine/consensus/sealing/engine.go b/engine/consensus/sealing/engine.go index a14018b2481..cceefdf1264 100644 --- a/engine/consensus/sealing/engine.go +++ b/engine/consensus/sealing/engine.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/gammazero/workerpool" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/consensus/hotstuff/model" @@ -124,13 +125,13 @@ func NewEngine(log zerolog.Logger, } // register engine with the approval provider - _, err = net.Register(network.ReceiveApprovals, e) + _, err = net.Register(channels.ReceiveApprovals, e) if err != nil { return nil, fmt.Errorf("could not register for approvals: %w", err) } // register engine to the channel for requesting missing approvals - approvalConduit, err := net.Register(network.RequestApprovalsByChunk, e) + approvalConduit, err := net.Register(channels.RequestApprovalsByChunk, e) if err != nil { return nil, fmt.Errorf("could not register for requesting approvals: %w", err) } @@ -248,7 +249,7 @@ func (e *Engine) setupMessageHandler(getSealingConfigs module.SealingConfigsGett } // Process sends event into channel with pending events. Generally speaking shouldn't lock for too long. -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { err := e.messageHandler.Process(originID, event) if err != nil { if engine.IsIncompatibleInputTypeError(err) { @@ -390,7 +391,7 @@ func (e *Engine) SubmitLocal(event interface{}) { // Submit submits the given event from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. -func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { err := e.Process(channel, originID, event) if err != nil { e.log.Fatal().Err(err).Msg("internal error processing event") diff --git a/engine/consensus/sealing/engine_test.go b/engine/consensus/sealing/engine_test.go index 35ade14f14d..047e986e8f2 100644 --- a/engine/consensus/sealing/engine_test.go +++ b/engine/consensus/sealing/engine_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -17,7 +18,6 @@ import ( "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/metrics" mockmodule "github.com/onflow/flow-go/module/mock" - "github.com/onflow/flow-go/network" mockprotocol "github.com/onflow/flow-go/state/protocol/mock" mockstorage "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" @@ -166,7 +166,7 @@ func (s *SealingEngineSuite) TestMultipleProcessingItems() { go func() { defer wg.Done() for _, approval := range approvals { - err := s.engine.Process(network.ReceiveApprovals, approverID, approval) + err := s.engine.Process(channels.ReceiveApprovals, approverID, approval) s.Require().NoError(err, "should process approval") } }() @@ -174,7 +174,7 @@ func (s *SealingEngineSuite) TestMultipleProcessingItems() { go func() { defer wg.Done() for _, approval := range responseApprovals { - err := s.engine.Process(network.ReceiveApprovals, approverID, approval) + err := s.engine.Process(channels.ReceiveApprovals, approverID, approval) s.Require().NoError(err, "should process approval") } }() @@ -193,7 +193,7 @@ func (s *SealingEngineSuite) TestApprovalInvalidOrigin() { originID := unittest.IdentifierFixture() approval := unittest.ResultApprovalFixture() // with random ApproverID - err := s.engine.Process(network.ReceiveApprovals, originID, approval) + err := s.engine.Process(channels.ReceiveApprovals, originID, approval) s.Require().NoError(err, "approval from unknown verifier should be dropped but not error") // sealing engine has at least 100ms ticks for processing events diff --git a/engine/execution/execution_test.go b/engine/execution/execution_test.go index 63bc3e4f511..66953f72497 100644 --- a/engine/execution/execution_test.go +++ b/engine/execution/execution_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -20,7 +21,6 @@ import ( "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/signature" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/stub" "github.com/onflow/flow-go/state/cluster" @@ -28,7 +28,7 @@ import ( ) func sendBlock(exeNode *testmock.ExecutionNode, from flow.Identifier, proposal *messages.BlockProposal) error { - return exeNode.FollowerEngine.Process(network.ReceiveBlocks, from, proposal) + return exeNode.FollowerEngine.Process(channels.ReceiveBlocks, from, proposal) } // Test when the ingestion engine receives a block, it will @@ -139,7 +139,7 @@ func TestExecutionFlow(t *testing.T) { // create collection node that can respond collections to execution node // check collection node received the collection request from execution node providerEngine := new(mocknetwork.Engine) - provConduit, _ := collectionNode.Net.Register(network.ProvideCollections, providerEngine) + provConduit, _ := collectionNode.Net.Register(channels.ProvideCollections, providerEngine) providerEngine.On("Process", mock.AnythingOfType("network.Channel"), exeID.NodeID, mock.Anything). Run(func(args mock.Arguments) { originID := args.Get(1).(flow.Identifier) @@ -176,7 +176,7 @@ func TestExecutionFlow(t *testing.T) { // create verification engine that can create approvals and send to consensus nodes // check the verification engine received the ER from execution node verificationEngine := new(mocknetwork.Engine) - _, _ = verificationNode.Net.Register(network.ReceiveReceipts, verificationEngine) + _, _ = verificationNode.Net.Register(channels.ReceiveReceipts, verificationEngine) verificationEngine.On("Process", mock.AnythingOfType("network.Channel"), exeID.NodeID, mock.Anything). Run(func(args mock.Arguments) { lock.Lock() @@ -191,7 +191,7 @@ func TestExecutionFlow(t *testing.T) { // create consensus engine that accepts the result // check the consensus engine has received the result from execution node consensusEngine := new(mocknetwork.Engine) - _, _ = consensusNode.Net.Register(network.ReceiveReceipts, consensusEngine) + _, _ = consensusNode.Net.Register(channels.ReceiveReceipts, consensusEngine) consensusEngine.On("Process", mock.AnythingOfType("network.Channel"), exeID.NodeID, mock.Anything). Run(func(args mock.Arguments) { lock.Lock() @@ -402,7 +402,7 @@ func TestFailedTxWillNotChangeStateCommitment(t *testing.T) { receiptsReceived := atomic.Uint64{} consensusEngine := new(mocknetwork.Engine) - _, _ = consensusNode.Net.Register(network.ReceiveReceipts, consensusEngine) + _, _ = consensusNode.Net.Register(channels.ReceiveReceipts, consensusEngine) consensusEngine.On("Process", mock.AnythingOfType("network.Channel"), mock.Anything, mock.Anything). Run(func(args mock.Arguments) { receiptsReceived.Inc() @@ -465,7 +465,7 @@ func TestFailedTxWillNotChangeStateCommitment(t *testing.T) { func mockCollectionEngineToReturnCollections(t *testing.T, collectionNode *testmock.GenericNode, cols []*flow.Collection) *mocknetwork.Engine { collectionEngine := new(mocknetwork.Engine) - colConduit, _ := collectionNode.Net.Register(network.RequestCollections, collectionEngine) + colConduit, _ := collectionNode.Net.Register(channels.RequestCollections, collectionEngine) // make lookup colMap := make(map[flow.Identifier][]byte) @@ -546,8 +546,8 @@ func TestBroadcastToMultipleVerificationNodes(t *testing.T) { actualCalls := atomic.Uint64{} verificationEngine := new(mocknetwork.Engine) - _, _ = verification1Node.Net.Register(network.ReceiveReceipts, verificationEngine) - _, _ = verification2Node.Net.Register(network.ReceiveReceipts, verificationEngine) + _, _ = verification1Node.Net.Register(channels.ReceiveReceipts, verificationEngine) + _, _ = verification2Node.Net.Register(channels.ReceiveReceipts, verificationEngine) verificationEngine.On("Process", mock.AnythingOfType("network.Channel"), exeID.NodeID, mock.Anything). Run(func(args mock.Arguments) { actualCalls.Inc() diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 68045ea8cb7..3a706263f06 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -8,6 +8,7 @@ import ( "strings" "time" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -124,7 +125,7 @@ func New( } // move to state syncing engine - syncConduit, err := net.Register(network.SyncExecution, &eng) + syncConduit, err := net.Register(channels.SyncExecution, &eng) if err != nil { return nil, fmt.Errorf("could not register execution blockSync engine: %w", err) } @@ -166,7 +167,7 @@ func (e *Engine) SubmitLocal(event interface{}) { // Submit submits the given event from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. -func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { e.unit.Launch(func() { err := e.process(originID, event) if err != nil { @@ -180,7 +181,7 @@ func (e *Engine) ProcessLocal(event interface{}) error { return fmt.Errorf("ingestion error does not process local events") } -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { return e.unit.Do(func() error { return e.process(originID, event) }) diff --git a/engine/execution/ingestion/engine_test.go b/engine/execution/ingestion/engine_test.go index 5ebfef84332..a0c136bb0a3 100644 --- a/engine/execution/ingestion/engine_test.go +++ b/engine/execution/ingestion/engine_test.go @@ -9,13 +9,12 @@ import ( "time" "github.com/golang/mock/gomock" + engineCommon "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - engineCommon "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/engine/execution" computation "github.com/onflow/flow-go/engine/execution/computation/mock" diff --git a/engine/execution/provider/engine.go b/engine/execution/provider/engine.go index b08601011b6..1ffffd31c45 100644 --- a/engine/execution/provider/engine.go +++ b/engine/execution/provider/engine.go @@ -7,6 +7,7 @@ import ( "math/rand" "time" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine" @@ -74,12 +75,12 @@ func New( var err error - eng.receiptCon, err = net.Register(network.PushReceipts, &eng) + eng.receiptCon, err = net.Register(channels.PushReceipts, &eng) if err != nil { return nil, fmt.Errorf("could not register receipt provider engine: %w", err) } - chunksConduit, err := net.Register(network.ProvideChunks, &eng) + chunksConduit, err := net.Register(channels.ProvideChunks, &eng) if err != nil { return nil, fmt.Errorf("could not register chunk data pack provider engine: %w", err) } @@ -97,7 +98,7 @@ func (e *Engine) SubmitLocal(event interface{}) { }) } -func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { e.unit.Launch(func() { err := e.Process(channel, originID, event) if err != nil { @@ -124,7 +125,7 @@ func (e *Engine) Done() <-chan struct{} { return e.unit.Done() } -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { return e.unit.Do(func() error { return e.process(originID, event) }) diff --git a/engine/execution/provider/mock/provider_engine.go b/engine/execution/provider/mock/provider_engine.go index 60c4162dcb9..76ce567af15 100644 --- a/engine/execution/provider/mock/provider_engine.go +++ b/engine/execution/provider/mock/provider_engine.go @@ -5,10 +5,11 @@ package mock import ( context "context" + channels "github.com/onflow/flow-go/network/channels" + flow "github.com/onflow/flow-go/model/flow" - mock "github.com/stretchr/testify/mock" - network "github.com/onflow/flow-go/network" + mock "github.com/stretchr/testify/mock" ) // ProviderEngine is an autogenerated mock type for the ProviderEngine type @@ -47,11 +48,11 @@ func (_m *ProviderEngine) Done() <-chan struct{} { } // Process provides a mock function with given fields: channel, originID, event -func (_m *ProviderEngine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (_m *ProviderEngine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { ret := _m.Called(channel, originID, event) var r0 error - if rf, ok := ret.Get(0).(func(network.Channel, flow.Identifier, interface{}) error); ok { + if rf, ok := ret.Get(0).(func(channels.Channel, flow.Identifier, interface{}) error); ok { r0 = rf(channel, originID, event) } else { r0 = ret.Error(0) @@ -91,7 +92,7 @@ func (_m *ProviderEngine) Ready() <-chan struct{} { } // Submit provides a mock function with given fields: channel, originID, event -func (_m *ProviderEngine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (_m *ProviderEngine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { _m.Called(channel, originID, event) } diff --git a/engine/ghost/client/ghost_client.go b/engine/ghost/client/ghost_client.go index e144acdcb8d..7cef5f361cd 100644 --- a/engine/ghost/client/ghost_client.go +++ b/engine/ghost/client/ghost_client.go @@ -6,6 +6,7 @@ import ( "fmt" "io" + "github.com/onflow/flow-go/network/channels" "google.golang.org/grpc" "github.com/onflow/flow-go/utils/unittest" @@ -51,7 +52,7 @@ func (c *GhostClient) Close() error { return c.close() } -func (c *GhostClient) Send(ctx context.Context, channel network.Channel, event interface{}, targetIDs ...flow.Identifier) error { +func (c *GhostClient) Send(ctx context.Context, channel channels.Channel, event interface{}, targetIDs ...flow.Identifier) error { message, err := c.codec.Encode(event) if err != nil { diff --git a/engine/ghost/engine/handler.go b/engine/ghost/engine/handler.go index ef4ccda8c2c..66e42ff57bc 100644 --- a/engine/ghost/engine/handler.go +++ b/engine/ghost/engine/handler.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/golang/protobuf/ptypes/empty" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -17,14 +18,14 @@ import ( // Handler handles the GRPC calls from a client type Handler struct { log zerolog.Logger - conduitMap map[network.Channel]network.Conduit + conduitMap map[channels.Channel]network.Conduit msgChan chan ghost.FlowMessage codec network.Codec } var _ ghost.GhostNodeAPIServer = Handler{} -func NewHandler(log zerolog.Logger, conduitMap map[network.Channel]network.Conduit, msgChan chan ghost.FlowMessage, codec network.Codec) *Handler { +func NewHandler(log zerolog.Logger, conduitMap map[channels.Channel]network.Conduit, msgChan chan ghost.FlowMessage, codec network.Codec) *Handler { return &Handler{ log: log.With().Str("component", "ghost_engine_handler").Logger(), conduitMap: conduitMap, @@ -38,7 +39,7 @@ func (h Handler) SendEvent(_ context.Context, req *ghost.SendEventRequest) (*emp channelID := req.GetChannelId() // find the conduit for the channel - conduit, found := h.conduitMap[network.Channel(channelID)] + conduit, found := h.conduitMap[channels.Channel(channelID)] if !found { return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("conduit not found for given channel %v", channelID)) diff --git a/engine/ghost/engine/rpc.go b/engine/ghost/engine/rpc.go index 20207f3bd72..6d45f7d967a 100644 --- a/engine/ghost/engine/rpc.go +++ b/engine/ghost/engine/rpc.go @@ -4,6 +4,7 @@ import ( "fmt" "net" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "google.golang.org/grpc" @@ -81,20 +82,20 @@ func New(net network.Network, log zerolog.Logger, me module.Local, state protoco } // registerConduits registers for ALL channels and returns a map of engine id to conduit -func registerConduits(net network.Network, state protocol.State, eng network.Engine) (map[network.Channel]network.Conduit, error) { +func registerConduits(net network.Network, state protocol.State, eng network.Engine) (map[channels.Channel]network.Conduit, error) { // create a list of all channels that don't change over time - channels := network.ChannelList{ - network.ConsensusCommittee, - network.SyncCommittee, - network.SyncExecution, - network.PushTransactions, - network.PushGuarantees, - network.PushBlocks, - network.PushReceipts, - network.PushApprovals, - network.RequestCollections, - network.RequestChunks, + channels := channels.ChannelList{ + channels.ConsensusCommittee, + channels.SyncCommittee, + channels.SyncExecution, + channels.PushTransactions, + channels.PushGuarantees, + channels.PushBlocks, + channels.PushReceipts, + channels.PushApprovals, + channels.RequestCollections, + channels.RequestChunks, } // add channels that are dependent on protocol state and change over time @@ -116,12 +117,12 @@ func registerConduits(net network.Network, state protocol.State, eng network.Eng // add the dynamic channels for the cluster channels = append( channels, - network.ChannelConsensusCluster(clusterID), - network.ChannelSyncCluster(clusterID), + channels.ChannelConsensusCluster(clusterID), + channels.ChannelSyncCluster(clusterID), ) } - conduitMap := make(map[network.Channel]network.Conduit, len(channels)) + conduitMap := make(map[channels.Channel]network.Conduit, len(channels)) // Register for ALL channels here and return a map of conduits for _, e := range channels { @@ -163,7 +164,7 @@ func (e *RPC) SubmitLocal(event interface{}) { // Submit submits the given event from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. -func (e *RPC) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (e *RPC) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { e.unit.Launch(func() { err := e.process(originID, event) if err != nil { @@ -181,7 +182,7 @@ func (e *RPC) ProcessLocal(event interface{}) error { // Process processes the given event from the node with the given origin ID in // a blocking manner. It returns the potential processing error when done. -func (e *RPC) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *RPC) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { return e.unit.Do(func() error { return e.process(originID, event) }) diff --git a/engine/testutil/nodes.go b/engine/testutil/nodes.go index f6b6bb2a8fb..0c9cfd88376 100644 --- a/engine/testutil/nodes.go +++ b/engine/testutil/nodes.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -69,7 +70,6 @@ import ( chainsync "github.com/onflow/flow-go/module/synchronization" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/module/validation" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/stub" "github.com/onflow/flow-go/state/protocol" @@ -259,7 +259,7 @@ func CollectionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ro coll, err := collections.ByID(collID) return coll, err } - providerEngine, err := provider.New(node.Log, node.Metrics, node.Net, node.Me, node.State, network.ProvideCollections, selector, retrieve) + providerEngine, err := provider.New(node.Log, node.Metrics, node.Net, node.Me, node.State, channels.ProvideCollections, selector, retrieve) require.NoError(t, err) pusherEngine, err := pusher.New(node.Log, node.Net, node.State, node.Metrics, node.Metrics, node.Me, collections, transactions) @@ -376,7 +376,7 @@ func ConsensusNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identit require.Nil(t, err) // request receipts from execution nodes - receiptRequester, err := requester.New(node.Log, node.Metrics, node.Net, node.Me, node.State, network.RequestReceiptsByBlockID, filter.Any, func() flow.Entity { return &flow.ExecutionReceipt{} }) + receiptRequester, err := requester.New(node.Log, node.Metrics, node.Net, node.Me, node.State, channels.RequestReceiptsByBlockID, filter.Any, func() flow.Entity { return &flow.ExecutionReceipt{} }) require.Nil(t, err) assigner, err := chunks.NewChunkAssigner(flow.DefaultChunkAssignmentAlpha, node.State) @@ -525,7 +525,7 @@ func ExecutionNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identit requestEngine, err := requester.New( node.Log, node.Metrics, node.Net, node.Me, node.State, - network.RequestCollections, + channels.RequestCollections, filter.HasRole(flow.RoleCollection), func() flow.Entity { return &flow.Collection{} }, ) diff --git a/engine/verification/requester/requester.go b/engine/verification/requester/requester.go index 9cd61083c4d..2435aa6deed 100644 --- a/engine/verification/requester/requester.go +++ b/engine/verification/requester/requester.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "golang.org/x/exp/rand" @@ -87,7 +88,7 @@ func New(log zerolog.Logger, reqQualifierFunc: reqQualifierFunc, } - con, err := net.Register(network.RequestChunks, e) + con, err := net.Register(channels.RequestChunks, e) if err != nil { return nil, fmt.Errorf("could not register chunk data pack provider engine: %w", err) } @@ -108,7 +109,7 @@ func (e *Engine) SubmitLocal(event interface{}) { // Submit submits the given event from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. -func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { e.unit.Launch(func() { err := e.Process(channel, originID, event) if err != nil { @@ -124,7 +125,7 @@ func (e *Engine) ProcessLocal(event interface{}) error { // Process processes the given event from the node with the given origin ID in // a blocking manner. It returns the potential processing error when done. -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { return e.unit.Do(func() error { return e.process(originID, event) }) diff --git a/engine/verification/requester/requester_test.go b/engine/verification/requester/requester_test.go index 2e2a44778aa..debaa452488 100644 --- a/engine/verification/requester/requester_test.go +++ b/engine/verification/requester/requester_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" testifymock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -21,7 +22,6 @@ import ( mempool "github.com/onflow/flow-go/module/mempool/mock" "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/trace" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" protocol "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/utils/unittest" @@ -68,7 +68,7 @@ func setupTest() *RequesterEngineTestSuite { func newRequesterEngine(t *testing.T, s *RequesterEngineTestSuite) *requester.Engine { net := &mocknetwork.Network{} // mocking the network registration of the engine - net.On("Register", network.RequestChunks, testifymock.Anything). + net.On("Register", channels.RequestChunks, testifymock.Anything). Return(s.con, nil). Once() @@ -136,7 +136,7 @@ func TestHandleChunkDataPack_HappyPath(t *testing.T) { s.metrics.On("OnChunkDataPackResponseReceivedFromNetworkByRequester").Return().Once() s.metrics.On("OnChunkDataPackSentToFetcher").Return().Once() - err := e.Process(network.RequestChunks, originID, response) + err := e.Process(channels.RequestChunks, originID, response) require.Nil(t, err) testifymock.AssertExpectationsForObjects(t, s.con, s.handler, s.pendingRequests, s.metrics) @@ -165,7 +165,7 @@ func TestHandleChunkDataPack_HappyPath_Multiple(t *testing.T) { s.metrics.On("OnChunkDataPackSentToFetcher").Return().Times(len(responses)) for _, response := range responses { - err := e.Process(network.RequestChunks, originID, response) + err := e.Process(channels.RequestChunks, originID, response) require.Nil(t, err) } @@ -191,7 +191,7 @@ func TestHandleChunkDataPack_FailedRequestRemoval(t *testing.T) { s.pendingRequests.On("PopAll", response.ChunkDataPack.ChunkID).Return(nil, false).Once() s.metrics.On("OnChunkDataPackResponseReceivedFromNetworkByRequester").Return().Once() - err := e.Process(network.RequestChunks, originID, response) + err := e.Process(channels.RequestChunks, originID, response) require.Nil(t, err) testifymock.AssertExpectationsForObjects(t, s.pendingRequests, s.con, s.metrics) @@ -271,7 +271,7 @@ func TestCompleteRequestingUnsealedChunkLifeCycle(t *testing.T) { // we wait till the engine submits the chunk request to the network, and receive the response conduitWG := mockConduitForChunkDataPackRequest(t, s.con, requests, 1, func(request *messages.ChunkDataRequest) { - err := e.Process(network.RequestChunks, requests[0].Agrees[0], response) + err := e.Process(channels.RequestChunks, requests[0].Agrees[0], response) require.NoError(t, err) }) unittest.RequireReturnsBefore(t, requestHistoryWG.Wait, time.Duration(2)*s.retryInterval, "could not check chunk requests qualification on time") @@ -365,7 +365,7 @@ func TestReceivingChunkDataResponseForDuplicateChunkRequests(t *testing.T) { s.metrics.On("OnChunkDataPackResponseReceivedFromNetworkByRequester").Return().Once() s.metrics.On("OnChunkDataPackSentToFetcher").Return().Twice() - err := e.Process(network.RequestChunks, originID, responseA) + err := e.Process(channels.RequestChunks, originID, responseA) require.Nil(t, err) unittest.RequireReturnsBefore(t, handlerWG.Wait, time.Second, "could not handle chunk data responses on time") diff --git a/engine/verification/utils/unittest/helper.go b/engine/verification/utils/unittest/helper.go index f249fb782a4..c0ed62c1c80 100644 --- a/engine/verification/utils/unittest/helper.go +++ b/engine/verification/utils/unittest/helper.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" @@ -56,7 +57,7 @@ func SetupChunkDataPackProvider(t *testing.T, exeNode := testutil.GenericNodeFromParticipants(t, hub, exeIdentity, participants, chainID) exeEngine := new(mocknetwork.Engine) - exeChunkDataConduit, err := exeNode.Net.Register(network.ProvideChunks, exeEngine) + exeChunkDataConduit, err := exeNode.Net.Register(channels.ProvideChunks, exeEngine) assert.Nil(t, err) replied := make(map[flow.Identifier]struct{}) @@ -234,7 +235,7 @@ func SetupMockConsensusNode(t *testing.T, wg.Done() }).Return(nil) - _, err := conNode.Net.Register(network.ReceiveApprovals, conEngine) + _, err := conNode.Net.Register(channels.ReceiveApprovals, conEngine) assert.Nil(t, err) return &conNode, conEngine, wg diff --git a/engine/verification/verifier/engine.go b/engine/verification/verifier/engine.go index 56621dc8488..0b5442f3d02 100644 --- a/engine/verification/verifier/engine.go +++ b/engine/verification/verifier/engine.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/onflow/flow-go/network/channels" "github.com/opentracing/opentracing-go/log" "github.com/rs/zerolog" @@ -70,12 +71,12 @@ func New( } var err error - e.pushConduit, err = net.Register(network.PushApprovals, e) + e.pushConduit, err = net.Register(channels.PushApprovals, e) if err != nil { return nil, fmt.Errorf("could not register engine on approval push channel: %w", err) } - e.pullConduit, err = net.Register(network.ProvideApprovalsByChunk, e) + e.pullConduit, err = net.Register(channels.ProvideApprovalsByChunk, e) if err != nil { return nil, fmt.Errorf("could not register engine on approval pull channel: %w", err) } @@ -106,7 +107,7 @@ func (e *Engine) SubmitLocal(event interface{}) { // Submit submits the given event from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. -func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { e.unit.Launch(func() { err := e.Process(channel, originID, event) if err != nil { @@ -124,7 +125,7 @@ func (e *Engine) ProcessLocal(event interface{}) error { // Process processes the given event from the node with the given origin ID in // a blocking manner. It returns the potential processing error when done. -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { return e.unit.Do(func() error { return e.process(originID, event) }) diff --git a/engine/verification/verifier/engine_test.go b/engine/verification/verifier/engine_test.go index 01b0924523a..d170ae28fe4 100644 --- a/engine/verification/verifier/engine_test.go +++ b/engine/verification/verifier/engine_test.go @@ -6,6 +6,7 @@ import ( "fmt" "testing" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" testifymock "github.com/stretchr/testify/mock" @@ -23,7 +24,6 @@ import ( realModule "github.com/onflow/flow-go/module" mockmodule "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/trace" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" protocol "github.com/onflow/flow-go/state/protocol/mock" mockstorage "github.com/onflow/flow-go/storage/mock" @@ -65,11 +65,11 @@ func (suite *VerifierEngineTestSuite) SetupTest() { suite.approvals.On("Store", mock.Anything).Return(nil) suite.approvals.On("Index", mock.Anything, mock.Anything, mock.Anything).Return(nil) - suite.net.On("Register", network.PushApprovals, testifymock.Anything). + suite.net.On("Register", channels.PushApprovals, testifymock.Anything). Return(suite.pushCon, nil). Once() - suite.net.On("Register", network.ProvideApprovalsByChunk, testifymock.Anything). + suite.net.On("Register", channels.ProvideApprovalsByChunk, testifymock.Anything). Return(suite.pullCon, nil). Once() diff --git a/follower/follower_builder.go b/follower/follower_builder.go index d6616cd2eb9..efc38ef70dc 100644 --- a/follower/follower_builder.go +++ b/follower/follower_builder.go @@ -12,6 +12,7 @@ import ( "github.com/libp2p/go-libp2p-core/routing" dht "github.com/libp2p/go-libp2p-kad-dht" p2ppubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/onflow/flow-go/network/channels" followereng "github.com/onflow/flow-go/engine/common/follower" finalizer "github.com/onflow/flow-go/module/finalizer/consensus" @@ -668,7 +669,7 @@ func (builder *FollowerServiceBuilder) enqueuePublicNetworkInit() { return nil, err } - builder.Network = converter.NewNetwork(net, network.SyncCommittee, network.PublicSyncCommittee) + builder.Network = converter.NewNetwork(net, channels.SyncCommittee, channels.PublicSyncCommittee) builder.Logger.Info().Msgf("network will run on address: %s", builder.BindAddr) diff --git a/fvm/blueprints/epochs.go b/fvm/blueprints/epochs.go index 90c368ae9e1..9169e2cc237 100644 --- a/fvm/blueprints/epochs.go +++ b/fvm/blueprints/epochs.go @@ -28,7 +28,7 @@ transaction { const deployEpochTransactionTemplate = ` import FlowClusterQC from 0x%s -transaction(clusterWeights: [{String: UInt64}]) { +transaction(clusterWeights: [{Name: UInt64}]) { prepare(serviceAccount: AuthAccount) { // first, construct Cluster objects from cluster weights @@ -52,7 +52,7 @@ transaction(clusterWeights: [{String: UInt64}]) { collectorClusters: clusters, // NOTE: clusterQCs and dkgPubKeys are empty because these initial values are not used clusterQCs: [] as [FlowClusterQC.ClusterQC], - dkgPubKeys: [] as [String], + dkgPubKeys: [] as [Name], ) } } diff --git a/fvm/fvm_bench_test.go b/fvm/fvm_bench_test.go index e8c307fe631..60fdf34d3c9 100644 --- a/fvm/fvm_bench_test.go +++ b/fvm/fvm_bench_test.go @@ -87,9 +87,9 @@ func (account *TestBenchAccount) AddArrayToStorage(b *testing.B, blockExec TestB serviceAccount := blockExec.ServiceAccount(b) txBody := flow.NewTransactionBody(). SetScript([]byte(` - transaction(list: [String]) { + transaction(list: [Name]) { prepare(acct: AuthAccount) { - acct.load<[String]>(from: /storage/test) + acct.load<[Name]>(from: /storage/test) acct.save(list, to: /storage/test) } execute {} @@ -446,13 +446,13 @@ func BenchmarkRuntimeTransaction(b *testing.B) { }) b.Run("load and save empty string on signers address", func(b *testing.B) { benchTransaction(b, templateTx(100, ` - signer.load(from: /storage/testpath) + signer.load(from: /storage/testpath) signer.save("", to: /storage/testpath) `)) }) b.Run("load and save long string on signers address", func(b *testing.B) { benchTransaction(b, templateTx(100, fmt.Sprintf(` - signer.load(from: /storage/testpath) + signer.load(from: /storage/testpath) signer.save("%s", to: /storage/testpath) `, longString))) }) @@ -467,7 +467,7 @@ func BenchmarkRuntimeTransaction(b *testing.B) { }) b.Run("borrow array from storage", func(b *testing.B) { benchTransaction(b, templateTx(100, ` - let strings = signer.borrow<&[String]>(from: /storage/test)! + let strings = signer.borrow<&[Name]>(from: /storage/test)! var i = 0 while (i < strings.length) { log(strings[i]) @@ -477,7 +477,7 @@ func BenchmarkRuntimeTransaction(b *testing.B) { }) b.Run("copy array from storage", func(b *testing.B) { benchTransaction(b, templateTx(100, ` - let strings = signer.copy<[String]>(from: /storage/test)! + let strings = signer.copy<[Name]>(from: /storage/test)! var i = 0 while (i < strings.length) { log(strings[i]) @@ -697,7 +697,7 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc pub contract BatchNFT: NonFungibleToken { pub event ContractInitialized() - pub event PlayCreated(id: UInt32, metadata: {String:String}) + pub event PlayCreated(id: UInt32, metadata: {Name:Name}) pub event NewSeriesStarted(newCurrentSeries: UInt32) pub event SetCreated(setID: UInt32, series: UInt32) pub event PlayAddedToSet(setID: UInt32, playID: UInt32) @@ -717,9 +717,9 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc pub struct Play { pub let playID: UInt32 - pub let metadata: {String: String} + pub let metadata: {Name: Name} - init(metadata: {String: String}) { + init(metadata: {Name: Name}) { pre { metadata.length != 0: "New Play Metadata cannot be empty" } @@ -733,9 +733,9 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc pub struct SetData { pub let setID: UInt32 - pub let name: String + pub let name: Name pub let series: UInt32 - init(name: String) { + init(name: Name) { pre { name.length > 0: "New Set name cannot be empty" } @@ -754,7 +754,7 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc pub var locked: Bool pub var numberMintedPerPlay: {UInt32: UInt32} - init(name: String) { + init(name: Name) { self.setID = BatchNFT.nextSetID self.plays = [] self.retired = {} @@ -869,7 +869,7 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc } pub resource Admin { - pub fun createPlay(metadata: {String: String}): UInt32 { + pub fun createPlay(metadata: {Name: Name}): UInt32 { var newPlay = Play(metadata: metadata) let newID = newPlay.playID @@ -878,7 +878,7 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc return newID } - pub fun createSet(name: String) { + pub fun createSet(name: Name) { var newSet <- create Set(name: name) BatchNFT.sets[newSet.setID] <-! newSet @@ -993,11 +993,11 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc return BatchNFT.playDatas.values } - pub fun getPlayMetaData(playID: UInt32): {String: String}? { + pub fun getPlayMetaData(playID: UInt32): {Name: Name}? { return self.playDatas[playID]?.metadata } - pub fun getPlayMetaDataByField(playID: UInt32, field: String): String? { + pub fun getPlayMetaDataByField(playID: UInt32, field: Name): Name? { if let play = BatchNFT.playDatas[playID] { return play.metadata[field] } else { @@ -1005,7 +1005,7 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc } } - pub fun getSetName(setID: UInt32): String? { + pub fun getSetName(setID: UInt32): Name? { return BatchNFT.setDatas[setID]?.name } @@ -1013,7 +1013,7 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc return BatchNFT.setDatas[setID]?.series } - pub fun getSetIDsByName(setName: String): [UInt32]? { + pub fun getSetIDsByName(setName: Name): [UInt32]? { var setIDs: [UInt32] = [] for setData in BatchNFT.setDatas.values { diff --git a/fvm/fvm_blockcontext_test.go b/fvm/fvm_blockcontext_test.go index 7c9b7e00a87..c410e8456da 100644 --- a/fvm/fvm_blockcontext_test.go +++ b/fvm/fvm_blockcontext_test.go @@ -93,7 +93,7 @@ func filterAccountCreatedEvents(events []flow.Event) []flow.Event { const auditContractForDeploymentTransactionTemplate = ` import FlowContractAudits from 0x%s -transaction(deployAddress: Address, code: String) { +transaction(deployAddress: Address, code: Name) { prepare(serviceAccount: AuthAccount) { let auditorAdmin = serviceAccount.borrow<&FlowContractAudits.Administrator>(from: FlowContractAudits.AdminStoragePath) @@ -333,7 +333,7 @@ func TestBlockContext_DeployContract(t *testing.T) { SetScript([]byte(` transaction { prepare(signer: AuthAccount) { - var s : String = "" + var s : Name = "" for name in signer.contracts.names { s = s.concat(name).concat(",") } @@ -756,7 +756,7 @@ func TestBlockContext_ExecuteTransaction_WithArguments(t *testing.T) { }, { label: "Multiple parameters", - script: `transaction(x: Int, y: String) { execute { log(x); log(y) } }`, + script: `transaction(x: Int, y: Name) { execute { log(x); log(y) } }`, args: [][]byte{arg1, arg2}, check: func(t *testing.T, tx *fvm.TransactionProcedure) { require.NoError(t, tx.Err) @@ -768,7 +768,7 @@ func TestBlockContext_ExecuteTransaction_WithArguments(t *testing.T) { { label: "Parameters and authorizer", script: ` - transaction(x: Int, y: String) { + transaction(x: Int, y: Name) { prepare(acct: AuthAccount) { log(acct.address) } execute { log(x); log(y) } }`, @@ -899,7 +899,7 @@ func TestBlockContext_ExecuteTransaction_StorageLimit(t *testing.T) { script := fmt.Sprintf(` access(all) contract Container { access(all) resource Counter { - pub var longString: String + pub var longString: Name init() { self.longString = "%s" } @@ -1014,7 +1014,7 @@ func TestBlockContext_ExecuteTransaction_InteractionLimitReached(t *testing.T) { script := fmt.Sprintf(` access(all) contract Container { access(all) resource Counter { - pub var longString: String + pub var longString: Name init() { self.longString = "%s" } diff --git a/fvm/fvm_signature_test.go b/fvm/fvm_signature_test.go index 53d17f71f4c..97976adcec3 100644 --- a/fvm/fvm_signature_test.go +++ b/fvm/fvm_signature_test.go @@ -768,7 +768,7 @@ func TestBLSMultiSignature(t *testing.T) { publicKeys: [[UInt8]], signatures: [[UInt8]], message: [UInt8], - tag: String, + tag: Name, ): Bool { let pks: [PublicKey] = [] for pk in publicKeys { diff --git a/fvm/fvm_test.go b/fvm/fvm_test.go index 225b6cb218c..694d0ea391f 100644 --- a/fvm/fvm_test.go +++ b/fvm/fvm_test.go @@ -184,7 +184,7 @@ func TestHashing(t *testing.T) { ` import Crypto - pub fun main(data: [UInt8], tag: String): [UInt8] { + pub fun main(data: [UInt8], tag: Name): [UInt8] { return Crypto.hashWithTag(data, tag: tag, algorithm: HashAlgorithm.%s) } `, hashName)) @@ -467,7 +467,7 @@ func TestEventLimits(t *testing.T) { testContract := ` access(all) contract TestContract { - access(all) event LargeEvent(value: Int256, str: String, list: [UInt256], dic: {String: String}) + access(all) event LargeEvent(value: Int256, str: Name, list: [UInt256], dic: {Name: Name}) access(all) fun EmitEvent() { var s: Int256 = 1024102410241024 var i = 0 diff --git a/fvm/handler/programs_test.go b/fvm/handler/programs_test.go index 18af736766d..6507e6b5218 100644 --- a/fvm/handler/programs_test.go +++ b/fvm/handler/programs_test.go @@ -39,7 +39,7 @@ func Test_Programs(t *testing.T) { contractA0Code := ` pub contract A { - pub fun hello(): String { + pub fun hello(): Name { return "bad version" } } @@ -47,7 +47,7 @@ func Test_Programs(t *testing.T) { contractACode := ` pub contract A { - pub fun hello(): String { + pub fun hello(): Name { return "hello from A" } } @@ -57,7 +57,7 @@ func Test_Programs(t *testing.T) { import A from 0xa pub contract B { - pub fun hello(): String { + pub fun hello(): Name { return "hello from B but also ".concat(A.hello()) } } @@ -67,7 +67,7 @@ func Test_Programs(t *testing.T) { import B from 0xb pub contract C { - pub fun hello(): String { + pub fun hello(): Name { return "hello from C, ".concat(B.hello()) } } diff --git a/insecure/attacknetwork/attackNetwork.go b/insecure/attacknetwork/attackNetwork.go index f99ba725bb5..aee2042c171 100644 --- a/insecure/attacknetwork/attackNetwork.go +++ b/insecure/attacknetwork/attackNetwork.go @@ -8,6 +8,7 @@ import ( "github.com/golang/protobuf/ptypes/empty" "github.com/hashicorp/go-multierror" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "google.golang.org/grpc" @@ -219,7 +220,7 @@ func (a *AttackNetwork) processMessageFromCorruptedNode(message *insecure.Messag err = a.orchestrator.HandleEventFromCorruptedNode(&insecure.Event{ CorruptedNodeId: sender, - Channel: network.Channel(message.ChannelID), + Channel: channels.Channel(message.ChannelID), FlowProtocolEvent: event, Protocol: message.Protocol, TargetNum: message.TargetNum, @@ -256,7 +257,7 @@ func (a *AttackNetwork) Send(event *insecure.Event) error { // eventToMessage converts the given application layer event to a protobuf message that is meant to be sent to the corrupted node. func (a *AttackNetwork) eventToMessage(corruptedId flow.Identifier, event interface{}, - channel network.Channel, + channel channels.Channel, protocol insecure.Protocol, num uint32, targetIds ...flow.Identifier) (*insecure.Message, error) { diff --git a/insecure/conduitController.go b/insecure/conduitController.go index f2716aa71d0..d54d1b06e54 100644 --- a/insecure/conduitController.go +++ b/insecure/conduitController.go @@ -2,15 +2,15 @@ package insecure import ( "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) // ConduitController defines part of the behavior of a corruptible conduit factory that controls the conduits it creates. type ConduitController interface { // HandleIncomingEvent sends an incoming event to the conduit factory to process. - HandleIncomingEvent(interface{}, network.Channel, Protocol, uint32, ...flow.Identifier) error + HandleIncomingEvent(interface{}, channels.Channel, Protocol, uint32, ...flow.Identifier) error // EngineClosingChannel informs the conduit factory that the corresponding engine of the given channel is not going to // use it anymore, hence the channel can be closed. - EngineClosingChannel(network.Channel) error + EngineClosingChannel(channels.Channel) error } diff --git a/insecure/corruptible/conduit.go b/insecure/corruptible/conduit.go index a65cad24cc5..28a5e62ae63 100644 --- a/insecure/corruptible/conduit.go +++ b/insecure/corruptible/conduit.go @@ -6,7 +6,7 @@ import ( "github.com/onflow/flow-go/insecure" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) // Conduit implements a corruptible conduit that sends all incoming events to its registered controller (i.e., factory) @@ -14,7 +14,7 @@ import ( type Conduit struct { ctx context.Context cancel context.CancelFunc - channel network.Channel + channel channels.Channel conduitController insecure.ConduitController } diff --git a/insecure/corruptible/conduit_test.go b/insecure/corruptible/conduit_test.go index fcc97dcd354..f641aefd0a9 100644 --- a/insecure/corruptible/conduit_test.go +++ b/insecure/corruptible/conduit_test.go @@ -6,11 +6,11 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/insecure" mockinsecure "github.com/onflow/flow-go/insecure/mock" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/utils/unittest" ) @@ -18,7 +18,7 @@ import ( func TestConduitRelayMessage_Publish(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) controller := &mockinsecure.ConduitMaster{} - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") c := &Conduit{ ctx: ctx, @@ -46,7 +46,7 @@ func TestConduitRelayMessage_Publish(t *testing.T) { func TestConduitRelayMessage_Multicast(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) controller := &mockinsecure.ConduitMaster{} - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") num := 3 // targets of multicast c := &Conduit{ @@ -75,7 +75,7 @@ func TestConduitRelayMessage_Multicast(t *testing.T) { func TestConduitRelayMessage_Unicast(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) controller := &mockinsecure.ConduitMaster{} - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") c := &Conduit{ ctx: ctx, @@ -100,7 +100,7 @@ func TestConduitRelayMessage_Unicast(t *testing.T) { func TestConduitReflectError_Unicast(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) controller := &mockinsecure.ConduitMaster{} - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") c := &Conduit{ ctx: ctx, @@ -125,7 +125,7 @@ func TestConduitReflectError_Unicast(t *testing.T) { func TestConduitReflectError_Multicast(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) controller := &mockinsecure.ConduitMaster{} - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") num := 3 // targets of multicast c := &Conduit{ @@ -155,7 +155,7 @@ func TestConduitReflectError_Multicast(t *testing.T) { func TestConduitReflectError_Publish(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) controller := &mockinsecure.ConduitMaster{} - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") c := &Conduit{ ctx: ctx, @@ -184,7 +184,7 @@ func TestConduitReflectError_Publish(t *testing.T) { func TestConduitClose_HappyPath(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) controller := &mockinsecure.ConduitMaster{} - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") c := &Conduit{ ctx: ctx, @@ -210,7 +210,7 @@ func TestConduitClose_HappyPath(t *testing.T) { func TestConduitClose_Error(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) controller := &mockinsecure.ConduitMaster{} - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") c := &Conduit{ ctx: ctx, diff --git a/insecure/corruptible/factory.go b/insecure/corruptible/factory.go index 2834ec394b6..8d362ffc331 100644 --- a/insecure/corruptible/factory.go +++ b/insecure/corruptible/factory.go @@ -9,6 +9,7 @@ import ( "sync" "github.com/golang/protobuf/ptypes/empty" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "google.golang.org/grpc" grpcinsecure "google.golang.org/grpc/credentials/insecure" @@ -138,7 +139,7 @@ func (c *ConduitFactory) RegisterAdapter(adapter network.Adapter) error { // NewConduit creates a conduit on the specified channel. // Prior to creating any conduit, the factory requires an Adapter to be registered with it. -func (c *ConduitFactory) NewConduit(ctx context.Context, channel network.Channel) (network.Conduit, error) { +func (c *ConduitFactory) NewConduit(ctx context.Context, channel channels.Channel) (network.Conduit, error) { if c.adapter == nil { return nil, fmt.Errorf("could not create a new conduit, missing a registered network adapter") } @@ -245,7 +246,7 @@ func (c *ConduitFactory) processAttackerMessage(msg *insecure.Message) error { Uint32("targets_num", msg.TargetNum). Logger() - err = c.sendOnNetwork(event, network.Channel(msg.ChannelID), msg.Protocol, uint(msg.TargetNum), targetIds...) + err = c.sendOnNetwork(event, channels.Channel(msg.ChannelID), msg.Protocol, uint(msg.TargetNum), targetIds...) if err != nil { lg.Err(err).Msg("could not send attacker message to the network") return fmt.Errorf("could not send attacker message to the network: %w", err) @@ -296,7 +297,7 @@ func (c *ConduitFactory) registerAttacker(address string) error { // to deliver to its targets. func (c *ConduitFactory) HandleIncomingEvent( event interface{}, - channel network.Channel, + channel channels.Channel, protocol insecure.Protocol, num uint32, targetIds ...flow.Identifier) error { @@ -322,14 +323,14 @@ func (c *ConduitFactory) HandleIncomingEvent( // EngineClosingChannel is called by the slave conduits of this factory to let it know that the corresponding engine of the // conduit is not going to use it anymore, so the channel can be closed safely. -func (c *ConduitFactory) EngineClosingChannel(channel network.Channel) error { +func (c *ConduitFactory) EngineClosingChannel(channel channels.Channel) error { return c.adapter.UnRegisterChannel(channel) } // eventToMessage converts the given application layer event to a protobuf message that is meant to be sent to the attacker. func (c *ConduitFactory) eventToMessage( event interface{}, - channel network.Channel, + channel channels.Channel, protocol insecure.Protocol, targetNum uint32, targetIds ...flow.Identifier) (*insecure.Message, error) { @@ -352,7 +353,7 @@ func (c *ConduitFactory) eventToMessage( // sendOnNetwork dispatches the given event to the networking layer of the node in order to be delivered // through the specified protocol to the target identifiers. func (c *ConduitFactory) sendOnNetwork(event interface{}, - channel network.Channel, + channel channels.Channel, protocol insecure.Protocol, num uint, targetIds ...flow.Identifier) error { switch protocol { diff --git a/insecure/corruptible/factory_test.go b/insecure/corruptible/factory_test.go index 634d4e80220..7063f85919a 100644 --- a/insecure/corruptible/factory_test.go +++ b/insecure/corruptible/factory_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" testifymock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "google.golang.org/grpc" @@ -19,7 +20,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/codec/cbor" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/utils/unittest" @@ -54,7 +54,7 @@ func TestNewConduit_HappyPath(t *testing.T) { me, cbor.NewCodec(), "localhost:0") - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") adapter := &mocknetwork.Adapter{} @@ -75,7 +75,7 @@ func TestNewConduit_MissingAdapter(t *testing.T) { me, cbor.NewCodec(), "localhost:0") - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") c, err := f.NewConduit(context.Background(), channel) require.Error(t, err) @@ -98,7 +98,7 @@ func TestFactoryHandleIncomingEvent_AttackerObserve(t *testing.T) { event := &message.TestMessage{Text: "this is a test message"} targetIds := unittest.IdentifierListFixture(10) - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") go func() { err := f.HandleIncomingEvent(event, channel, insecure.Protocol_MULTICAST, uint32(3), targetIds...) @@ -142,7 +142,7 @@ func TestFactoryHandleIncomingEvent_UnicastOverNetwork(t *testing.T) { event := &message.TestMessage{Text: "this is a test message"} targetId := unittest.IdentifierFixture() - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") adapter.On("UnicastOnChannel", channel, event, targetId).Return(nil).Once() @@ -170,7 +170,7 @@ func TestFactoryHandleIncomingEvent_PublishOverNetwork(t *testing.T) { require.NoError(t, err) event := &message.TestMessage{Text: "this is a test message"} - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") targetIds := unittest.IdentifierListFixture(10) params := []interface{}{channel, event} @@ -204,7 +204,7 @@ func TestFactoryHandleIncomingEvent_MulticastOverNetwork(t *testing.T) { require.NoError(t, err) event := &message.TestMessage{Text: "this is a test message"} - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") targetIds := unittest.IdentifierListFixture(10) params := []interface{}{channel, event, uint(3)} @@ -236,7 +236,7 @@ func TestProcessAttackerMessage(t *testing.T) { Text: fmt.Sprintf("this is a test message: %d", rand.Int()), }) - params := []interface{}{network.Channel(msg.ChannelID), event.FlowProtocolEvent, uint(3)} + params := []interface{}{channels.Channel(msg.ChannelID), event.FlowProtocolEvent, uint(3)} targetIds, err := flow.ByteSlicesToIds(msg.TargetIDs) require.NoError(t, err) @@ -282,7 +282,7 @@ func TestProcessAttackerMessage_ResultApproval_Dictated(t *testing.T) { }, }) - params := []interface{}{network.Channel(msg.ChannelID), testifymock.Anything} + params := []interface{}{channels.Channel(msg.ChannelID), testifymock.Anything} targetIds, err := flow.ByteSlicesToIds(msg.TargetIDs) require.NoError(t, err) for _, id := range targetIds { @@ -346,7 +346,7 @@ func TestProcessAttackerMessage_ResultApproval_PassThrough(t *testing.T) { passThroughApproval := unittest.ResultApprovalFixture() msg, _, _ := insecure.MessageFixture(t, cbor.NewCodec(), insecure.Protocol_PUBLISH, passThroughApproval) - params := []interface{}{network.Channel(msg.ChannelID), testifymock.Anything} + params := []interface{}{channels.Channel(msg.ChannelID), testifymock.Anything} targetIds, err := flow.ByteSlicesToIds(msg.TargetIDs) require.NoError(t, err) for _, id := range targetIds { @@ -395,7 +395,7 @@ func TestProcessAttackerMessage_ExecutionReceipt_Dictated(t *testing.T) { ExecutionResult: dictatedResult, }) - params := []interface{}{network.Channel(msg.ChannelID), testifymock.Anything} + params := []interface{}{channels.Channel(msg.ChannelID), testifymock.Anything} targetIds, err := flow.ByteSlicesToIds(msg.TargetIDs) require.NoError(t, err) for _, id := range targetIds { @@ -450,7 +450,7 @@ func TestProcessAttackerMessage_ExecutionReceipt_PassThrough(t *testing.T) { passThroughReceipt := unittest.ExecutionReceiptFixture() msg, _, _ := insecure.MessageFixture(t, cbor.NewCodec(), insecure.Protocol_PUBLISH, passThroughReceipt) - params := []interface{}{network.Channel(msg.ChannelID), testifymock.Anything} + params := []interface{}{channels.Channel(msg.ChannelID), testifymock.Anything} targetIds, err := flow.ByteSlicesToIds(msg.TargetIDs) require.NoError(t, err) for _, id := range targetIds { @@ -497,7 +497,7 @@ func TestEngineClosingChannel(t *testing.T) { err := f.RegisterAdapter(adapter) require.NoError(t, err) - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") // on invoking adapter.UnRegisterChannel(channel), it must return a nil, which means // that the channel has been unregistered by the adapter successfully. diff --git a/insecure/event.go b/insecure/event.go index e0f009a51ea..e25c782d698 100644 --- a/insecure/event.go +++ b/insecure/event.go @@ -2,7 +2,7 @@ package insecure import ( "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) const ( @@ -18,10 +18,10 @@ const ( // The attacker decodes the message into an event and relays it to the orchestrator. // Each corrupted conduit is uniquely identified by 1) corrupted node ID and 2) channel type Event struct { - CorruptedNodeId flow.Identifier // identifier of corrupted flow node that this corruptible conduit belongs to - Channel network.Channel // channel of the event on the corrupted conduit - Protocol Protocol // networking-layer protocol that this event was meant to send on. - TargetNum uint32 // number of randomly chosen targets (used in multicast protocol). + CorruptedNodeId flow.Identifier // identifier of corrupted flow node that this corruptible conduit belongs to + Channel channels.Channel // channel of the event on the corrupted conduit + Protocol Protocol // networking-layer protocol that this event was meant to send on. + TargetNum uint32 // number of randomly chosen targets (used in multicast protocol). // set of target identifiers (can be any subset of nodes, either honest or corrupted). TargetIds flow.IdentifierList diff --git a/insecure/fixtures.go b/insecure/fixtures.go index 498a4a875b5..3f1df818555 100644 --- a/insecure/fixtures.go +++ b/insecure/fixtures.go @@ -5,6 +5,7 @@ import ( "math/rand" "testing" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" @@ -33,7 +34,7 @@ func MessageFixture(t *testing.T, codec network.Codec, protocol Protocol, conten targetNum = uint32(3) } - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") // encodes event to create payload payload, err := codec.Encode(content) require.NoError(t, err) diff --git a/insecure/integration/test/composability_test.go b/insecure/integration/test/composability_test.go index 098e2b829a4..053ea981134 100644 --- a/insecure/integration/test/composability_test.go +++ b/insecure/integration/test/composability_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + flownet "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/engine/testutil" @@ -16,7 +17,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/module/irrecoverable" - flownet "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/codec/cbor" "github.com/onflow/flow-go/network/stub" "github.com/onflow/flow-go/utils/unittest" diff --git a/insecure/mock/conduit_controller.go b/insecure/mock/conduit_controller.go index 4c9f34edfa3..217e7bbcfe9 100644 --- a/insecure/mock/conduit_controller.go +++ b/insecure/mock/conduit_controller.go @@ -3,12 +3,12 @@ package mockinsecure import ( - insecure "github.com/onflow/flow-go/insecure" flow "github.com/onflow/flow-go/model/flow" + channels "github.com/onflow/flow-go/network/channels" - mock "github.com/stretchr/testify/mock" + insecure "github.com/onflow/flow-go/insecure" - network "github.com/onflow/flow-go/network" + mock "github.com/stretchr/testify/mock" ) // ConduitController is an autogenerated mock type for the ConduitController type @@ -17,11 +17,11 @@ type ConduitController struct { } // EngineClosingChannel provides a mock function with given fields: _a0 -func (_m *ConduitController) EngineClosingChannel(_a0 network.Channel) error { +func (_m *ConduitController) EngineClosingChannel(_a0 channels.Channel) error { ret := _m.Called(_a0) var r0 error - if rf, ok := ret.Get(0).(func(network.Channel) error); ok { + if rf, ok := ret.Get(0).(func(channels.Channel) error); ok { r0 = rf(_a0) } else { r0 = ret.Error(0) @@ -31,7 +31,7 @@ func (_m *ConduitController) EngineClosingChannel(_a0 network.Channel) error { } // HandleIncomingEvent provides a mock function with given fields: _a0, _a1, _a2, _a3, _a4 -func (_m *ConduitController) HandleIncomingEvent(_a0 interface{}, _a1 network.Channel, _a2 insecure.Protocol, _a3 uint32, _a4 ...flow.Identifier) error { +func (_m *ConduitController) HandleIncomingEvent(_a0 interface{}, _a1 channels.Channel, _a2 insecure.Protocol, _a3 uint32, _a4 ...flow.Identifier) error { _va := make([]interface{}, len(_a4)) for _i := range _a4 { _va[_i] = _a4[_i] @@ -42,7 +42,7 @@ func (_m *ConduitController) HandleIncomingEvent(_a0 interface{}, _a1 network.Ch ret := _m.Called(_ca...) var r0 error - if rf, ok := ret.Get(0).(func(interface{}, network.Channel, insecure.Protocol, uint32, ...flow.Identifier) error); ok { + if rf, ok := ret.Get(0).(func(interface{}, channels.Channel, insecure.Protocol, uint32, ...flow.Identifier) error); ok { r0 = rf(_a0, _a1, _a2, _a3, _a4...) } else { r0 = ret.Error(0) diff --git a/insecure/mock/conduit_master.go b/insecure/mock/conduit_master.go index 3ba958900a2..ca303c63416 100644 --- a/insecure/mock/conduit_master.go +++ b/insecure/mock/conduit_master.go @@ -5,10 +5,9 @@ package mockinsecure import ( insecure "github.com/onflow/flow-go/insecure" flow "github.com/onflow/flow-go/model/flow" + network "github.com/onflow/flow-go/network/channels" mock "github.com/stretchr/testify/mock" - - network "github.com/onflow/flow-go/network" ) // ConduitMaster is an autogenerated mock type for the ConduitMaster type diff --git a/insecure/wintermute/attackOrchestrator.go b/insecure/wintermute/attackOrchestrator.go index a1432b838a5..1e6ea1359d8 100644 --- a/insecure/wintermute/attackOrchestrator.go +++ b/insecure/wintermute/attackOrchestrator.go @@ -4,13 +4,13 @@ import ( "fmt" "sync" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/insecure" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/messages" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/utils/logging" "github.com/onflow/flow-go/utils/unittest" ) @@ -353,7 +353,7 @@ func (o *Orchestrator) replyWithAttestation(chunkDataPackRequestEvent *insecure. consensusIds := o.allNodeIds.Filter(filter.HasRole(flow.RoleConsensus)).NodeIDs() err = o.network.Send(&insecure.Event{ CorruptedNodeId: chunkDataPackRequestEvent.CorruptedNodeId, - Channel: network.PushApprovals, + Channel: channels.PushApprovals, Protocol: insecure.Protocol_PUBLISH, TargetNum: 0, TargetIds: consensusIds, diff --git a/insecure/wintermute/attackOrchestrator_test.go b/insecure/wintermute/attackOrchestrator_test.go index f40cf20d920..351dc2e2c4a 100644 --- a/insecure/wintermute/attackOrchestrator_test.go +++ b/insecure/wintermute/attackOrchestrator_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -13,7 +14,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/messages" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/utils/unittest" ) @@ -247,7 +247,7 @@ func mockAttackNetworkForCorruptedExecutionResult( seen[event.CorruptedNodeId] = struct{}{} // make sure message being sent on correct channel - require.Equal(t, network.PushReceipts, event.Channel) + require.Equal(t, channels.PushReceipts, event.Channel) corruptedResult, ok := event.FlowProtocolEvent.(*flow.ExecutionReceipt) require.True(t, ok) diff --git a/insecure/wintermute/helpers.go b/insecure/wintermute/helpers.go index 6d4cf4e2f00..b6e78839c77 100644 --- a/insecure/wintermute/helpers.go +++ b/insecure/wintermute/helpers.go @@ -3,6 +3,7 @@ package wintermute import ( "testing" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/engine/testutil" @@ -12,7 +13,6 @@ import ( "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/trace" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/utils/unittest" ) @@ -57,7 +57,7 @@ func chunkDataPackRequestForReceipts( for _, verId := range corVnIds { event := &insecure.Event{ CorruptedNodeId: verId, - Channel: network.RequestChunks, + Channel: channels.RequestChunks, Protocol: insecure.Protocol_PUBLISH, TargetNum: 0, TargetIds: executorIds[result.ID()], @@ -119,7 +119,7 @@ func receiptsWithSameResultFixture( func executionReceiptEvent(receipt *flow.ExecutionReceipt, targetIds flow.IdentifierList) *insecure.Event { return &insecure.Event{ CorruptedNodeId: receipt.ExecutorID, - Channel: network.PushReceipts, + Channel: channels.PushReceipts, Protocol: insecure.Protocol_UNICAST, TargetIds: targetIds, FlowProtocolEvent: receipt, @@ -150,7 +150,7 @@ func chunkDataPackResponseForReceipts(receipts []*flow.ExecutionReceipt, verIds for _, verId := range verIds { event := &insecure.Event{ CorruptedNodeId: receipt.ExecutorID, - Channel: network.RequestChunks, + Channel: channels.RequestChunks, Protocol: insecure.Protocol_PUBLISH, TargetNum: 0, TargetIds: flow.IdentifierList{verId}, diff --git a/integration/dkg/dkg_emulator_suite.go b/integration/dkg/dkg_emulator_suite.go index 603bd1116ff..ec0f615814a 100644 --- a/integration/dkg/dkg_emulator_suite.go +++ b/integration/dkg/dkg_emulator_suite.go @@ -393,7 +393,7 @@ func (s *DKGSuite) getResult() []string { script := fmt.Sprintf(` import FlowDKG from 0x%s - pub fun main(): [String?]? { + pub fun main(): [Name?]? { return FlowDKG.dkgCompleted() } `, s.env.DkgAddress, diff --git a/integration/tests/consensus/inclusion_test.go b/integration/tests/consensus/inclusion_test.go index 2ade7b5e1fb..ba1ac3fdad5 100644 --- a/integration/tests/consensus/inclusion_test.go +++ b/integration/tests/consensus/inclusion_test.go @@ -12,7 +12,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/signature" - "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/utils/unittest" "github.com/rs/zerolog" "github.com/stretchr/testify/require" @@ -177,7 +177,7 @@ func (is *InclusionSuite) sendCollectionToConsensus(deadline time.Time, sentinel for time.Now().Before(deadline) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) is.T().Logf("%s sending collection %x to consensus node %v\n", time.Now(), colID, conID) - err := is.Collection().Send(ctx, network.PushGuarantees, sentinel, conID) + err := is.Collection().Send(ctx, channels.PushGuarantees, sentinel, conID) cancel() if err != nil { is.T().Logf("could not send collection guarantee: %s\n", err) diff --git a/integration/tests/consensus/sealing_test.go b/integration/tests/consensus/sealing_test.go index 42d0c952190..f212fe7d4ba 100644 --- a/integration/tests/consensus/sealing_test.go +++ b/integration/tests/consensus/sealing_test.go @@ -14,7 +14,7 @@ import ( "github.com/onflow/flow-go/integration/tests/lib" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" - "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/utils/unittest" "github.com/rs/zerolog" "github.com/stretchr/testify/require" @@ -290,8 +290,8 @@ SearchLoop: ReceiptLoop: for time.Now().Before(deadline) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) - err := ss.Execution().Send(ctx, network.PushReceipts, &receipt, ss.conIDs...) - err = ss.Execution2().Send(ctx, network.PushReceipts, &receipt2, ss.conIDs...) + err := ss.Execution().Send(ctx, channels.PushReceipts, &receipt, ss.conIDs...) + err = ss.Execution2().Send(ctx, channels.PushReceipts, &receipt2, ss.conIDs...) cancel() if err != nil { ss.T().Logf("could not send execution receipt: %s\n", err) @@ -338,7 +338,7 @@ ReceiptLoop: ApprovalLoop: for time.Now().Before(deadline) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) - err := ss.Verification().Send(ctx, network.PushApprovals, &approval, ss.conIDs...) + err := ss.Verification().Send(ctx, channels.PushApprovals, &approval, ss.conIDs...) cancel() if err != nil { ss.T().Logf("could not send result approval: %s\n", err) diff --git a/integration/tests/execution/chunk_data_pack_test.go b/integration/tests/execution/chunk_data_pack_test.go index b7c846b2c5a..6e1c2af912f 100644 --- a/integration/tests/execution/chunk_data_pack_test.go +++ b/integration/tests/execution/chunk_data_pack_test.go @@ -13,7 +13,7 @@ import ( "github.com/onflow/flow-go/ledger/partial" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" - "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" ) @@ -68,7 +68,7 @@ func (gs *ChunkDataPacksSuite) TestVerificationNodesRequestChunkDataPacks() { // TODO clear messages // send a ChunkDataRequest from Ghost node - err = gs.Ghost().Send(context.Background(), network.PushReceipts, + err = gs.Ghost().Send(context.Background(), channels.PushReceipts, &messages.ChunkDataRequest{ChunkID: chunkID, Nonce: rand.Uint64()}, []flow.Identifier{gs.exe1ID}...) require.NoError(gs.T(), err) diff --git a/integration/tests/execution/state_sync_test.go b/integration/tests/execution/state_sync_test.go index bb9ec8be194..e0b1b26b20d 100644 --- a/integration/tests/execution/state_sync_test.go +++ b/integration/tests/execution/state_sync_test.go @@ -8,7 +8,7 @@ import ( "github.com/onflow/flow-go/integration/tests/lib" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" - "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/utils/unittest" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -69,7 +69,7 @@ func (s *StateSyncSuite) TestStateSyncAfterNetworkPartition() { s.T().Logf("block C has been sealed: %v", sealed.Header.ID()) // send a ExecutionStateSyncRequest from Ghost node - err = s.Ghost().Send(context.Background(), network.SyncExecution, + err = s.Ghost().Send(context.Background(), channels.SyncExecution, &messages.ExecutionStateSyncRequest{FromHeight: blockA.Header.Height, ToHeight: blockC.Header.Height}, []flow.Identifier{s.exe1ID}...) require.NoError(s.T(), err) diff --git a/integration/tests/ghost/ghost_node_example_test.go b/integration/tests/ghost/ghost_node_example_test.go index a3c4400abc7..bfe5b92b7d6 100644 --- a/integration/tests/ghost/ghost_node_example_test.go +++ b/integration/tests/ghost/ghost_node_example_test.go @@ -10,7 +10,7 @@ import ( "github.com/onflow/flow-go/integration/tests/lib" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" - "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/utils/unittest" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" @@ -69,7 +69,7 @@ func TestGhostNodeExample_Send(t *testing.T) { tx := unittest.TransactionBodyFixture() // send the transaction as an event to a real collection node - err = ghostClient.Send(ctx, network.PushTransactions, &tx, realCollNode.Identifier) + err = ghostClient.Send(ctx, channels.PushTransactions, &tx, realCollNode.Identifier) assert.NoError(t, err) t.Logf("%v ================> FINISH TESTING %v", time.Now().UTC(), t.Name()) } diff --git a/integration/tests/network/network_test.go b/integration/tests/network/network_test.go index f5ac703bfea..6d4d4061604 100644 --- a/integration/tests/network/network_test.go +++ b/integration/tests/network/network_test.go @@ -12,7 +12,7 @@ import ( "github.com/onflow/flow-go/integration/tests/lib" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/libp2p/message" - "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/utils/unittest" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" @@ -77,7 +77,7 @@ func TestNetwork(t *testing.T) { // seed a message, it should propagate to all nodes. // (unlike regular nodes, a ghost node subscribes to all topics) - err = ghostClient.Send(ctx, network.PushGuarantees, event, targets...) + err = ghostClient.Send(ctx, channels.PushGuarantees, event, targets...) require.NoError(t, err) // wait for all read loops to finish diff --git a/integration/utils/scripts.go b/integration/utils/scripts.go index 689ed3e3ce3..04a72f95fd7 100644 --- a/integration/utils/scripts.go +++ b/integration/utils/scripts.go @@ -127,7 +127,7 @@ access(all) contract MyFavContract { } // items - access(all) event NewItemAddedEvent(id: UInt32, metadata: {String: String}) + access(all) event NewItemAddedEvent(id: UInt32, metadata: {Name: Name}) access(self) var itemCounter: UInt32 @@ -135,9 +135,9 @@ access(all) contract MyFavContract { pub let itemID: UInt32 - pub let metadata: {String: String} + pub let metadata: {Name: Name} - init(_ metadata: {String: String}) { + init(_ metadata: {Name: Name}) { self.itemID = MyFavContract.itemCounter self.metadata = metadata @@ -151,7 +151,7 @@ access(all) contract MyFavContract { access(self) var items: [Item] - access(all) fun AddItem(_ metadata: {String: String}){ + access(all) fun AddItem(_ metadata: {Name: Name}){ let item = Item(metadata) self.items.append(item) } @@ -181,7 +181,7 @@ access(all) contract MyFavContract { log(i) } - access(all) event LargeEvent(value: Int256, str: String, list: [UInt256], dic: {String: String}) + access(all) event LargeEvent(value: Int256, str: Name, list: [UInt256], dic: {Name: Name}) // event heavy function access(all) fun EventHeavy() { diff --git a/integration/utils/tx_stats_tracker_test.go b/integration/utils/tx_stats_tracker_test.go index f5dc7567db5..dd127b1a60f 100644 --- a/integration/utils/tx_stats_tracker_test.go +++ b/integration/utils/tx_stats_tracker_test.go @@ -40,7 +40,7 @@ func TestTxStatsTracker(t *testing.T) { assert.InDelta(t, 10., st.TTS.Max(), 1.) } -// TestTxStatsTrackerString tests the String() method. +// TestTxStatsTrackerString tests the Name() method. func TestTxStatsTrackerString(t *testing.T) { st := NewTxStatsTracker() assert.Equal(t, "[]\n[]\n[]\n", st.String()) diff --git a/ledger/complete/mtrie/trie/trie_test.go b/ledger/complete/mtrie/trie/trie_test.go index ecf02cbb94c..09113c58bd3 100644 --- a/ledger/complete/mtrie/trie/trie_test.go +++ b/ledger/complete/mtrie/trie/trie_test.go @@ -32,7 +32,7 @@ func Test_EmptyTrie(t *testing.T) { expectedRootHashHex := "568f4ec740fe3b5de88034cb7b1fbddb41548b068f31aebc8ae9189e429c5749" require.Equal(t, expectedRootHashHex, hashToString(rootHash)) - // check String() method does not panic: + // check Name() method does not panic: _ = emptyTrie.String() } diff --git a/model/convert/fixtures/fixture.go b/model/convert/fixtures/fixture.go index cb59647824a..03b6bdae2eb 100644 --- a/model/convert/fixtures/fixture.go +++ b/model/convert/fixtures/fixture.go @@ -173,7 +173,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000001" } }, @@ -187,21 +187,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "String", + "type": "Name", "value": "1.flow.com" } }, { "name": "networkingKey", "value": { - "type": "String", + "type": "Name", "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" } }, { "name": "stakingKey", "value": { - "type": "String", + "type": "Name", "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" } }, @@ -279,7 +279,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000002" } }, @@ -293,21 +293,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "String", + "type": "Name", "value": "2.flow.com" } }, { "name": "networkingKey", "value": { - "type": "String", + "type": "Name", "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" } }, { "name": "stakingKey", "value": { - "type": "String", + "type": "Name", "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" } }, @@ -385,7 +385,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000003" } }, @@ -399,21 +399,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "String", + "type": "Name", "value": "3.flow.com" } }, { "name": "networkingKey", "value": { - "type": "String", + "type": "Name", "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" } }, { "name": "stakingKey", "value": { - "type": "String", + "type": "Name", "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" } }, @@ -491,7 +491,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000004" } }, @@ -505,21 +505,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "String", + "type": "Name", "value": "4.flow.com" } }, { "name": "networkingKey", "value": { - "type": "String", + "type": "Name", "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" } }, { "name": "stakingKey", "value": { - "type": "String", + "type": "Name", "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" } }, @@ -597,7 +597,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000011" } }, @@ -611,21 +611,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "String", + "type": "Name", "value": "11.flow.com" } }, { "name": "networkingKey", "value": { - "type": "String", + "type": "Name", "value": "cfdfe8e4362c8f79d11772cb7277ab16e5033a63e8dd5d34caf1b041b77e5b2d63c2072260949ccf8907486e4cfc733c8c42ca0e4e208f30470b0d950856cd47" } }, { "name": "stakingKey", "value": { - "type": "String", + "type": "Name", "value": "8207559cd7136af378bba53a8f0196dee3849a3ab02897c1995c3e3f6ca0c4a776c3ae869d1ddbb473090054be2400ad06d7910aa2c5d1780220fdf3765a3c1764bce10c6fe66a5a2be51a422e878518bd750424bb56b8a0ecf0f8ad2057e83f" } }, @@ -703,7 +703,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000021" } }, @@ -717,21 +717,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "String", + "type": "Name", "value": "21.flow.com" } }, { "name": "networkingKey", "value": { - "type": "String", + "type": "Name", "value": "d64318ba0dbf68f3788fc81c41d507c5822bf53154530673127c66f50fe4469ccf1a054a868a9f88506a8999f2386d86fcd2b901779718cba4fb53c2da258f9e" } }, { "name": "stakingKey", "value": { - "type": "String", + "type": "Name", "value": "880b162b7ec138b36af401d07868cb08d25746d905395edbb4625bdf105d4bb2b2f4b0f4ae273a296a6efefa7ce9ccb914e39947ce0e83745125cab05d62516076ff0173ed472d3791ccef937597c9ea12381d76f547a092a4981d77ff3fba83" } }, @@ -809,7 +809,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000031" } }, @@ -823,21 +823,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "String", + "type": "Name", "value": "31.flow.com" } }, { "name": "networkingKey", "value": { - "type": "String", + "type": "Name", "value": "697241208dcc9142b6f53064adc8ff1c95760c68beb2ba083c1d005d40181fd7a1b113274e0163c053a3addd47cd528ec6a1f190cf465aac87c415feaae011ae" } }, { "name": "stakingKey", "value": { - "type": "String", + "type": "Name", "value": "b1f97d0a06020eca97352e1adde72270ee713c7daf58da7e74bf72235321048b4841bdfc28227964bf18e371e266e32107d238358848bcc5d0977a0db4bda0b4c33d3874ff991e595e0f537c7b87b4ddce92038ebc7b295c9ea20a1492302aa7" } }, @@ -948,7 +948,7 @@ var epochSetupFixtureJSON = ` "value": [ { "key": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000001" }, "value": { @@ -958,7 +958,7 @@ var epochSetupFixtureJSON = ` }, { "key": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000002" }, "value": { @@ -1005,7 +1005,7 @@ var epochSetupFixtureJSON = ` "value": [ { "key": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000003" }, "value": { @@ -1015,7 +1015,7 @@ var epochSetupFixtureJSON = ` }, { "key": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000004" }, "value": { @@ -1049,7 +1049,7 @@ var epochSetupFixtureJSON = ` { "name": "randomSource", "value": { - "type": "String", + "type": "Name", "value": "01020304" } }, @@ -1115,11 +1115,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "String", + "type": "Name", "value": "a39cd1e1bf7e2fb0609b7388ce5215a6a4c01eef2aee86e1a007faa28a6b2a3dc876e11bb97cdb26c3846231d2d01e4d" }, { - "type": "String", + "type": "Name", "value": "91673ad9c717d396c9a0953617733c128049ac1a639653d4002ab245b121df1939430e313bcbfd06948f6a281f6bf853" } ] @@ -1128,7 +1128,7 @@ var epochCommitFixtureJSON = ` { "name": "voteMessage", "value": { - "type": "String", + "type": "Name", "value": "irrelevant_for_these_purposes" } }, @@ -1138,11 +1138,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000001" }, { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000002" } ] @@ -1169,11 +1169,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "String", + "type": "Name", "value": "b2bff159971852ed63e72c37991e62c94822e52d4fdcd7bf29aaf9fb178b1c5b4ce20dd9594e029f3574cb29533b857a" }, { - "type": "String", + "type": "Name", "value": "9931562f0248c9195758da3de4fb92f24fa734cbc20c0cb80280163560e0e0348f843ac89ecbd3732e335940c1e8dccb" } ] @@ -1182,7 +1182,7 @@ var epochCommitFixtureJSON = ` { "name": "voteMessage", "value": { - "type": "String", + "type": "Name", "value": "irrelevant_for_these_purposes" } }, @@ -1192,11 +1192,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000003" }, { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000004" } ] @@ -1214,11 +1214,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "String", + "type": "Name", "value": "8c588266db5f5cda629e83f8aa04ae9413593fac19e4865d06d291c9d14fbdd9bdb86a7a12f9ef8590c79cb635e3163315d193087e9336092987150d0cd2b14ac6365f7dc93eec573752108b8c12368abb65f0652d9f644e5aed611c37926950" }, { - "type": "String", + "type": "Name", "value": "87a339e4e5c74f089da20a33f515d8c8f4464ab53ede5a74aa2432cd1ae66d522da0c122249ee176cd747ddc83ca81090498389384201614caf51eac392c1c0a916dfdcfbbdf7363f9552b6468434add3d3f6dc91a92bbe3ee368b59b7828488" } ] diff --git a/model/convert/fixtures_test.go b/model/convert/fixtures_test.go index 5c99d8709ee..f26d07f14ce 100644 --- a/model/convert/fixtures_test.go +++ b/model/convert/fixtures_test.go @@ -173,7 +173,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000001" } }, @@ -187,21 +187,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "String", + "type": "Name", "value": "1.flow.com" } }, { "name": "networkingKey", "value": { - "type": "String", + "type": "Name", "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" } }, { "name": "stakingKey", "value": { - "type": "String", + "type": "Name", "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" } }, @@ -279,7 +279,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000002" } }, @@ -293,21 +293,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "String", + "type": "Name", "value": "2.flow.com" } }, { "name": "networkingKey", "value": { - "type": "String", + "type": "Name", "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" } }, { "name": "stakingKey", "value": { - "type": "String", + "type": "Name", "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" } }, @@ -385,7 +385,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000003" } }, @@ -399,21 +399,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "String", + "type": "Name", "value": "3.flow.com" } }, { "name": "networkingKey", "value": { - "type": "String", + "type": "Name", "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" } }, { "name": "stakingKey", "value": { - "type": "String", + "type": "Name", "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" } }, @@ -491,7 +491,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000004" } }, @@ -505,21 +505,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "String", + "type": "Name", "value": "4.flow.com" } }, { "name": "networkingKey", "value": { - "type": "String", + "type": "Name", "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" } }, { "name": "stakingKey", "value": { - "type": "String", + "type": "Name", "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" } }, @@ -597,7 +597,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000011" } }, @@ -611,21 +611,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "String", + "type": "Name", "value": "11.flow.com" } }, { "name": "networkingKey", "value": { - "type": "String", + "type": "Name", "value": "cfdfe8e4362c8f79d11772cb7277ab16e5033a63e8dd5d34caf1b041b77e5b2d63c2072260949ccf8907486e4cfc733c8c42ca0e4e208f30470b0d950856cd47" } }, { "name": "stakingKey", "value": { - "type": "String", + "type": "Name", "value": "8207559cd7136af378bba53a8f0196dee3849a3ab02897c1995c3e3f6ca0c4a776c3ae869d1ddbb473090054be2400ad06d7910aa2c5d1780220fdf3765a3c1764bce10c6fe66a5a2be51a422e878518bd750424bb56b8a0ecf0f8ad2057e83f" } }, @@ -703,7 +703,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000021" } }, @@ -717,21 +717,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "String", + "type": "Name", "value": "21.flow.com" } }, { "name": "networkingKey", "value": { - "type": "String", + "type": "Name", "value": "d64318ba0dbf68f3788fc81c41d507c5822bf53154530673127c66f50fe4469ccf1a054a868a9f88506a8999f2386d86fcd2b901779718cba4fb53c2da258f9e" } }, { "name": "stakingKey", "value": { - "type": "String", + "type": "Name", "value": "880b162b7ec138b36af401d07868cb08d25746d905395edbb4625bdf105d4bb2b2f4b0f4ae273a296a6efefa7ce9ccb914e39947ce0e83745125cab05d62516076ff0173ed472d3791ccef937597c9ea12381d76f547a092a4981d77ff3fba83" } }, @@ -809,7 +809,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000031" } }, @@ -823,21 +823,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "String", + "type": "Name", "value": "31.flow.com" } }, { "name": "networkingKey", "value": { - "type": "String", + "type": "Name", "value": "697241208dcc9142b6f53064adc8ff1c95760c68beb2ba083c1d005d40181fd7a1b113274e0163c053a3addd47cd528ec6a1f190cf465aac87c415feaae011ae" } }, { "name": "stakingKey", "value": { - "type": "String", + "type": "Name", "value": "b1f97d0a06020eca97352e1adde72270ee713c7daf58da7e74bf72235321048b4841bdfc28227964bf18e371e266e32107d238358848bcc5d0977a0db4bda0b4c33d3874ff991e595e0f537c7b87b4ddce92038ebc7b295c9ea20a1492302aa7" } }, @@ -948,7 +948,7 @@ var epochSetupFixtureJSON = ` "value": [ { "key": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000001" }, "value": { @@ -958,7 +958,7 @@ var epochSetupFixtureJSON = ` }, { "key": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000002" }, "value": { @@ -1005,7 +1005,7 @@ var epochSetupFixtureJSON = ` "value": [ { "key": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000003" }, "value": { @@ -1015,7 +1015,7 @@ var epochSetupFixtureJSON = ` }, { "key": { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000004" }, "value": { @@ -1049,7 +1049,7 @@ var epochSetupFixtureJSON = ` { "name": "randomSource", "value": { - "type": "String", + "type": "Name", "value": "01020304" } }, @@ -1115,11 +1115,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "String", + "type": "Name", "value": "a39cd1e1bf7e2fb0609b7388ce5215a6a4c01eef2aee86e1a007faa28a6b2a3dc876e11bb97cdb26c3846231d2d01e4d" }, { - "type": "String", + "type": "Name", "value": "91673ad9c717d396c9a0953617733c128049ac1a639653d4002ab245b121df1939430e313bcbfd06948f6a281f6bf853" } ] @@ -1128,7 +1128,7 @@ var epochCommitFixtureJSON = ` { "name": "voteMessage", "value": { - "type": "String", + "type": "Name", "value": "irrelevant_for_these_purposes" } }, @@ -1138,11 +1138,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000001" }, { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000002" } ] @@ -1169,11 +1169,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "String", + "type": "Name", "value": "b2bff159971852ed63e72c37991e62c94822e52d4fdcd7bf29aaf9fb178b1c5b4ce20dd9594e029f3574cb29533b857a" }, { - "type": "String", + "type": "Name", "value": "9931562f0248c9195758da3de4fb92f24fa734cbc20c0cb80280163560e0e0348f843ac89ecbd3732e335940c1e8dccb" } ] @@ -1182,7 +1182,7 @@ var epochCommitFixtureJSON = ` { "name": "voteMessage", "value": { - "type": "String", + "type": "Name", "value": "irrelevant_for_these_purposes" } }, @@ -1192,11 +1192,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000003" }, { - "type": "String", + "type": "Name", "value": "0000000000000000000000000000000000000000000000000000000000000004" } ] @@ -1214,11 +1214,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "String", + "type": "Name", "value": "8c588266db5f5cda629e83f8aa04ae9413593fac19e4865d06d291c9d14fbdd9bdb86a7a12f9ef8590c79cb635e3163315d193087e9336092987150d0cd2b14ac6365f7dc93eec573752108b8c12368abb65f0652d9f644e5aed611c37926950" }, { - "type": "String", + "type": "Name", "value": "87a339e4e5c74f089da20a33f515d8c8f4464ab53ede5a74aa2432cd1ae66d522da0c122249ee176cd747ddc83ca81090498389384201614caf51eac392c1c0a916dfdcfbbdf7363f9552b6468434add3d3f6dc91a92bbe3ee368b59b7828488" } ] diff --git a/model/flow/identifierList.go b/model/flow/identifierList.go index 33ce2447707..964fd57a126 100644 --- a/model/flow/identifierList.go +++ b/model/flow/identifierList.go @@ -12,7 +12,7 @@ import ( type IdentifierList []Identifier // Len returns length of the IdentiferList in the number of stored identifiers. -// It satisfies the sort.Interface making the IdentifierList sortable. +// It satisfies the sort.Type making the IdentifierList sortable. func (il IdentifierList) Len() int { return len(il) } @@ -28,7 +28,7 @@ func (il IdentifierList) Lookup() map[Identifier]struct{} { // Less returns true if element i in the IdentifierList is less than j based on its identifier. // Otherwise it returns true. -// It satisfies the sort.Interface making the IdentifierList sortable. +// It satisfies the sort.Type making the IdentifierList sortable. func (il IdentifierList) Less(i, j int) bool { // bytes package already implements Comparable for []byte. switch bytes.Compare(il[i][:], il[j][:]) { @@ -43,7 +43,7 @@ func (il IdentifierList) Less(i, j int) bool { } // Swap swaps the element i and j in the IdentifierList. -// It satisfies the sort.Interface making the IdentifierList sortable. +// It satisfies the sort.Type making the IdentifierList sortable. func (il IdentifierList) Swap(i, j int) { il[j], il[i] = il[i], il[j] } diff --git a/model/flow/ledger_test.go b/model/flow/ledger_test.go index ec612b0d86f..e70e7e2471a 100644 --- a/model/flow/ledger_test.go +++ b/model/flow/ledger_test.go @@ -8,7 +8,7 @@ import ( ) // this benchmark can run with this command: -// go test -run=String -bench=. +// go test -run=Name -bench=. // this is to prevent lint errors var length int diff --git a/model/flow/role.go b/model/flow/role.go index f138a185d75..11acfdd0920 100644 --- a/model/flow/role.go +++ b/model/flow/role.go @@ -110,20 +110,20 @@ func (r RoleList) Union(other RoleList) RoleList { } // Len returns length of the RoleList in the number of stored roles. -// It satisfies the sort.Interface making the RoleList sortable. +// It satisfies the sort.Type making the RoleList sortable. func (r RoleList) Len() int { return len(r) } // Less returns true if element i in the RoleList is less than j based on the numerical value of its role. // Otherwise it returns true. -// It satisfies the sort.Interface making the RoleList sortable. +// It satisfies the sort.Type making the RoleList sortable. func (r RoleList) Less(i, j int) bool { return r[i] < r[j] } // Swap swaps the element i and j in the RoleList. -// It satisfies the sort.Interface making the RoleList sortable. +// It satisfies the sort.Type making the RoleList sortable. func (r RoleList) Swap(i, j int) { r[i], r[j] = r[j], r[i] } diff --git a/module/dkg/client.go b/module/dkg/client.go index e8401f23736..f7902c43764 100644 --- a/module/dkg/client.go +++ b/module/dkg/client.go @@ -256,7 +256,7 @@ func (c *Client) SubmitResult(groupPublicKey crypto.PublicKey, publicKeys []cryp // trim0x trims the `0x` if it exists from a hexadecimal string // This method is required as the DKG contract expects key lengths of 192 bytes -// the `PublicKey.String()` method returns the hexadecimal string representation of the +// the `PublicKey.Name()` method returns the hexadecimal string representation of the // public key prefixed with `0x` resulting in length of 194 bytes. func trim0x(hexString string) string { diff --git a/module/epochs/epoch_config.go b/module/epochs/epoch_config.go index 179add16f79..f9dbae46c06 100644 --- a/module/epochs/epoch_config.go +++ b/module/epochs/epoch_config.go @@ -42,7 +42,7 @@ func DefaultEpochConfig() EpochConfig { // transaction argument for the deployEpoch transaction used during execution // state bootstrapping. // -// The resulting argument has type [{String: UInt64}] which represents a list +// The resulting argument has type [{Name: UInt64}] which represents a list // of weight mappings for each cluster. The full Cluster struct is constructed // within the transaction in Cadence for simplicity here. // diff --git a/module/metrics/example/collection/main.go b/module/metrics/example/collection/main.go index 8a7e2cdfc60..d1cafed3f83 100644 --- a/module/metrics/example/collection/main.go +++ b/module/metrics/example/collection/main.go @@ -4,12 +4,12 @@ import ( "math/rand" "time" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/metrics/example" "github.com/onflow/flow-go/module/trace" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/queue" "github.com/onflow/flow-go/utils/unittest" ) @@ -30,8 +30,8 @@ func main() { NetworkCollector: metrics.NewNetworkCollector(), } - topic1 := network.TestNetworkChannel.String() - topic2 := network.TestMetricsChannel.String() + topic1 := channels.TestNetworkChannel.String() + topic2 := channels.TestMetricsChannel.String() message1 := "CollectionRequest" message2 := "ClusterBlockProposal" diff --git a/module/metrics/example/consensus/main.go b/module/metrics/example/consensus/main.go index 55ebd285c1d..511140772b3 100644 --- a/module/metrics/example/consensus/main.go +++ b/module/metrics/example/consensus/main.go @@ -5,6 +5,7 @@ import ( "math/rand" "time" + "github.com/onflow/flow-go/network/channels" "github.com/prometheus/client_golang/prometheus" "github.com/rs/zerolog" @@ -12,7 +13,6 @@ import ( "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/metrics/example" "github.com/onflow/flow-go/module/trace" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/utils/unittest" ) @@ -63,8 +63,8 @@ func main() { collector.FinishBlockToSeal(flow.HashToID(entityID)) } - collProvider := network.TestNetworkChannel.String() - collIngest := network.TestMetricsChannel.String() + collProvider := channels.TestNetworkChannel.String() + collIngest := channels.TestMetricsChannel.String() message1 := "CollectionRequest" message2 := "ClusterBlockProposal" diff --git a/module/mock/dht_metrics.go b/module/mock/dht_metrics.go new file mode 100644 index 00000000000..9120da430cc --- /dev/null +++ b/module/mock/dht_metrics.go @@ -0,0 +1,35 @@ +// Code generated by mockery v2.13.0. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// DHTMetrics is an autogenerated mock type for the DHTMetrics type +type DHTMetrics struct { + mock.Mock +} + +// RoutingTablePeerAdded provides a mock function with given fields: +func (_m *DHTMetrics) RoutingTablePeerAdded() { + _m.Called() +} + +// RoutingTablePeerRemoved provides a mock function with given fields: +func (_m *DHTMetrics) RoutingTablePeerRemoved() { + _m.Called() +} + +type NewDHTMetricsT interface { + mock.TestingT + Cleanup(func()) +} + +// NewDHTMetrics creates a new instance of DHTMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewDHTMetrics(t NewDHTMetricsT) *DHTMetrics { + mock := &DHTMetrics{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/network_metrics.go b/module/mock/network_metrics.go index a00e0fead46..deee82f07ab 100644 --- a/module/mock/network_metrics.go +++ b/module/mock/network_metrics.go @@ -98,6 +98,16 @@ func (_m *NetworkMetrics) QueueDuration(duration time.Duration, priority int) { _m.Called(duration, priority) } +// RoutingTablePeerAdded provides a mock function with given fields: +func (_m *NetworkMetrics) RoutingTablePeerAdded() { + _m.Called() +} + +// RoutingTablePeerRemoved provides a mock function with given fields: +func (_m *NetworkMetrics) RoutingTablePeerRemoved() { + _m.Called() +} + type NewNetworkMetricsT interface { mock.TestingT Cleanup(func()) diff --git a/module/mock/sealing_configs_getter.go b/module/mock/sealing_configs_getter.go new file mode 100644 index 00000000000..9e6ee9544ae --- /dev/null +++ b/module/mock/sealing_configs_getter.go @@ -0,0 +1,95 @@ +// Code generated by mockery v2.13.0. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// SealingConfigsGetter is an autogenerated mock type for the SealingConfigsGetter type +type SealingConfigsGetter struct { + mock.Mock +} + +// ApprovalRequestsThresholdConst provides a mock function with given fields: +func (_m *SealingConfigsGetter) ApprovalRequestsThresholdConst() uint64 { + ret := _m.Called() + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// ChunkAlphaConst provides a mock function with given fields: +func (_m *SealingConfigsGetter) ChunkAlphaConst() uint { + ret := _m.Called() + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// EmergencySealingActiveConst provides a mock function with given fields: +func (_m *SealingConfigsGetter) EmergencySealingActiveConst() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// RequireApprovalsForSealConstructionDynamicValue provides a mock function with given fields: +func (_m *SealingConfigsGetter) RequireApprovalsForSealConstructionDynamicValue() uint { + ret := _m.Called() + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// RequireApprovalsForSealVerificationConst provides a mock function with given fields: +func (_m *SealingConfigsGetter) RequireApprovalsForSealVerificationConst() uint { + ret := _m.Called() + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +type NewSealingConfigsGetterT interface { + mock.TestingT + Cleanup(func()) +} + +// NewSealingConfigsGetter creates a new instance of SealingConfigsGetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewSealingConfigsGetter(t NewSealingConfigsGetterT) *SealingConfigsGetter { + mock := &SealingConfigsGetter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/sealing_configs_setter.go b/module/mock/sealing_configs_setter.go new file mode 100644 index 00000000000..5e5d084e2ad --- /dev/null +++ b/module/mock/sealing_configs_setter.go @@ -0,0 +1,116 @@ +// Code generated by mockery v2.13.0. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// SealingConfigsSetter is an autogenerated mock type for the SealingConfigsSetter type +type SealingConfigsSetter struct { + mock.Mock +} + +// ApprovalRequestsThresholdConst provides a mock function with given fields: +func (_m *SealingConfigsSetter) ApprovalRequestsThresholdConst() uint64 { + ret := _m.Called() + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// ChunkAlphaConst provides a mock function with given fields: +func (_m *SealingConfigsSetter) ChunkAlphaConst() uint { + ret := _m.Called() + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// EmergencySealingActiveConst provides a mock function with given fields: +func (_m *SealingConfigsSetter) EmergencySealingActiveConst() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// RequireApprovalsForSealConstructionDynamicValue provides a mock function with given fields: +func (_m *SealingConfigsSetter) RequireApprovalsForSealConstructionDynamicValue() uint { + ret := _m.Called() + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// RequireApprovalsForSealVerificationConst provides a mock function with given fields: +func (_m *SealingConfigsSetter) RequireApprovalsForSealVerificationConst() uint { + ret := _m.Called() + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// SetRequiredApprovalsForSealingConstruction provides a mock function with given fields: newVal +func (_m *SealingConfigsSetter) SetRequiredApprovalsForSealingConstruction(newVal uint) (uint, error) { + ret := _m.Called(newVal) + + var r0 uint + if rf, ok := ret.Get(0).(func(uint) uint); ok { + r0 = rf(newVal) + } else { + r0 = ret.Get(0).(uint) + } + + var r1 error + if rf, ok := ret.Get(1).(func(uint) error); ok { + r1 = rf(newVal) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type NewSealingConfigsSetterT interface { + mock.TestingT + Cleanup(func()) +} + +// NewSealingConfigsSetter creates a new instance of SealingConfigsSetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewSealingConfigsSetter(t NewSealingConfigsSetterT) *SealingConfigsSetter { + mock := &SealingConfigsSetter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/cache/rcvcache_test.go b/network/cache/rcvcache_test.go index 45ee9e04622..4f4723af074 100644 --- a/network/cache/rcvcache_test.go +++ b/network/cache/rcvcache_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -13,7 +14,6 @@ import ( "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/network" netcache "github.com/onflow/flow-go/network/cache" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/utils/unittest" @@ -41,20 +41,20 @@ func (r *ReceiveCacheTestSuite) SetupTest() { // TestSingleElementAdd adds a single element to the cache and verifies its existence. func (r *ReceiveCacheTestSuite) TestSingleElementAdd() { - eventID, err := p2p.EventId(network.Channel("0"), []byte("event-1")) + eventID, err := p2p.EventId(channels.Channel("0"), []byte("event-1")) require.NoError(r.T(), err) assert.True(r.Suite.T(), r.c.Add(eventID)) assert.False(r.Suite.T(), r.c.Add(eventID)) // same channel but different event should be treated as unseen - eventID2, err := p2p.EventId(network.Channel("0"), []byte("event-2")) + eventID2, err := p2p.EventId(channels.Channel("0"), []byte("event-2")) require.NoError(r.T(), err) assert.True(r.Suite.T(), r.c.Add(eventID2)) assert.False(r.Suite.T(), r.c.Add(eventID2)) // same event but different channels should be treated as unseen - eventID3, err := p2p.EventId(network.Channel("1"), []byte("event-2")) + eventID3, err := p2p.EventId(channels.Channel("1"), []byte("event-2")) require.NoError(r.T(), err) assert.True(r.Suite.T(), r.c.Add(eventID3)) assert.False(r.Suite.T(), r.c.Add(eventID3)) @@ -62,7 +62,7 @@ func (r *ReceiveCacheTestSuite) TestSingleElementAdd() { // TestNoneExistence evaluates the correctness of cache operation against non-existing element func (r *ReceiveCacheTestSuite) TestNoneExistence() { - eventID, err := p2p.EventId(network.Channel("1"), []byte("non-existing event")) + eventID, err := p2p.EventId(channels.Channel("1"), []byte("non-existing event")) require.NoError(r.T(), err) // adding new event to cache should return true @@ -74,7 +74,7 @@ func (r *ReceiveCacheTestSuite) TestMultipleElementAdd() { // creates and populates slice of 10 events eventIDs := make([]hash.Hash, 0) for i := 0; i < r.size; i++ { - eventID, err := p2p.EventId(network.Channel("1"), []byte(fmt.Sprintf("event-%d", i))) + eventID, err := p2p.EventId(channels.Channel("1"), []byte(fmt.Sprintf("event-%d", i))) require.NoError(r.T(), err) eventIDs = append(eventIDs, eventID) @@ -112,7 +112,7 @@ func (r *ReceiveCacheTestSuite) TestLRU() { eventIDs := make([]hash.Hash, 0) total := r.size + 1 for i := 0; i < total; i++ { - eventID, err := p2p.EventId(network.Channel("1"), []byte(fmt.Sprintf("event-%d", i))) + eventID, err := p2p.EventId(channels.Channel("1"), []byte(fmt.Sprintf("event-%d", i))) require.NoError(r.T(), err) eventIDs = append(eventIDs, eventID) diff --git a/network/channel.go b/network/channels/channel.go similarity index 84% rename from network/channel.go rename to network/channels/channel.go index 3dd75bc57f8..920fe530522 100644 --- a/network/channel.go +++ b/network/channels/channel.go @@ -1,4 +1,4 @@ -package network +package channels import ( "sort" @@ -17,20 +17,20 @@ func (c Channel) String() string { } // Len returns length of the ChannelList in the number of stored Channels. -// It satisfies the sort.Interface making the ChannelList sortable. +// It satisfies the sort.Type making the ChannelList sortable. func (cl ChannelList) Len() int { return len(cl) } // Less returns true if element i in the ChannelList is less than j based on the numerical value of its Channel. // Otherwise it returns true. -// It satisfies the sort.Interface making the ChannelList sortable. +// It satisfies the sort.Type making the ChannelList sortable. func (cl ChannelList) Less(i, j int) bool { return cl[i] < cl[j] } // Swap swaps the element i and j in the ChannelList. -// It satisfies the sort.Interface making the ChannelList sortable. +// It satisfies the sort.Type making the ChannelList sortable. func (cl ChannelList) Swap(i, j int) { cl[i], cl[j] = cl[j], cl[i] } diff --git a/network/channels.go b/network/channels/channels.go similarity index 99% rename from network/channels.go rename to network/channels/channels.go index a8e833ee2fa..216dcf06c9a 100644 --- a/network/channels.go +++ b/network/channels/channels.go @@ -1,6 +1,6 @@ // (c) 2019 Dapper Labs - ALL RIGHTS RESERVED -package network +package channels import ( "fmt" diff --git a/network/channels_test.go b/network/channels/channels_test.go similarity index 91% rename from network/channels_test.go rename to network/channels/channels_test.go index fae8fafa7ac..19cd56eb64c 100644 --- a/network/channels_test.go +++ b/network/channels/channels_test.go @@ -1,4 +1,4 @@ -package network +package channels import ( "testing" @@ -119,13 +119,13 @@ func TestUniqueChannels_Uniqueness(t *testing.T) { // We use the identifier of RoleList to determine their uniqueness. func TestUniqueChannels_ClusterChannels(t *testing.T) { channels := ChannelsByRole(flow.RoleCollection) - consensusCluster := ChannelConsensusCluster(flow.Emulator) - syncCluster := ChannelSyncCluster(flow.Emulator) + consensusCluster := channels.ChannelConsensusCluster(flow.Emulator) + syncCluster := channels.ChannelSyncCluster(flow.Emulator) channels = append(channels, consensusCluster, syncCluster) - uniques := UniqueChannels(channels) + uniques := channels.UniqueChannels(channels) // collection role has two cluster and one non-cluster channels all with the same RoleList. // Hence all of them should be returned as unique channels. - require.Contains(t, uniques, syncCluster) // cluster channel - require.Contains(t, uniques, consensusCluster) // cluster channel - require.Contains(t, uniques, PushTransactions) // non-cluster channel + require.Contains(t, uniques, syncCluster) // cluster channel + require.Contains(t, uniques, consensusCluster) // cluster channel + require.Contains(t, uniques, channels.PushTransactions) // non-cluster channel } diff --git a/network/channels/topic.go b/network/channels/topic.go new file mode 100644 index 00000000000..6a5ba88b686 --- /dev/null +++ b/network/channels/topic.go @@ -0,0 +1,9 @@ +package channels + +// Topic is the internal type of Libp2p which corresponds to the Channel in the network level. +// It is a virtual medium enabling nodes to subscribe and communicate over epidemic dissemination. +type Topic string + +func (t Topic) String() string { + return string(t) +} diff --git a/network/conduit.go b/network/conduit.go index aafcc58c1a0..38ecff8ea98 100644 --- a/network/conduit.go +++ b/network/conduit.go @@ -8,6 +8,7 @@ import ( "fmt" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/network/channels" ) // ConduitFactory is an interface type that is utilized by the Network to create conduits for the channels. @@ -19,7 +20,7 @@ type ConduitFactory interface { // NewConduit creates a conduit on the specified channel. // Prior to creating any conduit, the factory requires an Adapter to be registered with it. - NewConduit(context.Context, Channel) (Conduit, error) + NewConduit(context.Context, channels.Channel) (Conduit, error) } // Conduit represents the interface for engines to communicate over the diff --git a/network/converter/network.go b/network/converter/network.go index 3653562634b..f5faf792db8 100644 --- a/network/converter/network.go +++ b/network/converter/network.go @@ -2,25 +2,26 @@ package converter import ( "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) type Network struct { network.Network - from network.Channel - to network.Channel + from channels.Channel + to channels.Channel } -func NewNetwork(net network.Network, from network.Channel, to network.Channel) *Network { +func NewNetwork(net network.Network, from channels.Channel, to channels.Channel) *Network { return &Network{net, from, to} } -func (n *Network) convert(channel network.Channel) network.Channel { +func (n *Network) convert(channel channels.Channel) channels.Channel { if channel == n.from { return n.to } return channel } -func (n *Network) Register(channel network.Channel, engine network.MessageProcessor) (network.Conduit, error) { +func (n *Network) Register(channel channels.Channel, engine network.MessageProcessor) (network.Conduit, error) { return n.Network.Register(n.convert(channel), engine) } diff --git a/network/engine.go b/network/engine.go index 1a6a29a1d1a..fc33c6f9563 100644 --- a/network/engine.go +++ b/network/engine.go @@ -4,6 +4,7 @@ package network import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/network/channels" ) // Engine represents an isolated process running across the peer-to-peer network @@ -24,7 +25,7 @@ type Engine interface { // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. // Deprecated: Only applicable for use by the networking layer, which should use MessageProcessor instead - Submit(channel Channel, originID flow.Identifier, event interface{}) + Submit(channel channels.Channel, originID flow.Identifier, event interface{}) // ProcessLocal processes an event originating on the local node. // Deprecated: To synchronously process a local message: @@ -36,7 +37,7 @@ type Engine interface { // in a blocking manner. It returns the potential processing error when // done. // Deprecated: Only applicable for use by the networking layer, which should use MessageProcessor instead - Process(channel Channel, originID flow.Identifier, event interface{}) error + Process(channel channels.Channel, originID flow.Identifier, event interface{}) error } // MessageProcessor represents a component which receives messages from the @@ -45,5 +46,5 @@ type Engine interface { // (including invalid message types, malformed messages, etc.). Because of this, // node-internal messages should NEVER be submitted to a component using Process. type MessageProcessor interface { - Process(channel Channel, originID flow.Identifier, message interface{}) error + Process(channel channels.Channel, originID flow.Identifier, message interface{}) error } diff --git a/network/message/authorization.go b/network/message/authorization.go new file mode 100644 index 00000000000..a5e3b2f82c8 --- /dev/null +++ b/network/message/authorization.go @@ -0,0 +1,391 @@ +package message + +import ( + "errors" + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/libp2p/message" + "github.com/onflow/flow-go/model/messages" + "github.com/onflow/flow-go/network/channels" +) + +// MsgAuthConfig contains authorization information for a specific flow message. The authorization +// is represented as a map from network channel -> list of all roles allowed to send the message on +// the channel. +type MsgAuthConfig struct { + // Name is the string representation of the message type. + Name string + // Type a func that returns a new instance of message type. + Type func() interface{} + // Config is the mapping of network channel to list of authorized flow roles. + Config map[channels.Channel]flow.RoleList +} + +// IsAuthorized checks if the specified role is authorized to send the message on channel and +// asserts that the message is authorized to be sent on channel. +// Expected error returns during normal operations: +// * ErrUnauthorizedMessageOnChannel: if channel does not exist in message config +// * ErrUnauthorizedRole: if list of authorized roles for message config does not include role +func (m MsgAuthConfig) IsAuthorized(role flow.Role, channel channels.Channel) error { + authorizedRoles, ok := m.Config[channel] + if !ok { + return fmt.Errorf("could not get authorization config for message type (%s) on channel (%s): %w", m.Name, channel, ErrUnauthorizedMessageOnChannel) + } + + if !authorizedRoles.Contains(role) { + return ErrUnauthorizedRole + } + + return nil +} + +var ( + ErrUnknownMsgType = errors.New("could not get authorization config for unknown message type") + ErrUnauthorizedMessageOnChannel = errors.New("message is not authorized to be sent on channel") + ErrUnauthorizedRole = errors.New("sender role not authorized to send message on channel") + AuthorizationConfigs map[string]MsgAuthConfig +) + +func initializeMessageAuthConfigsMap() { + AuthorizationConfigs = make(map[string]MsgAuthConfig) + + // consensus + AuthorizationConfigs[BlockProposal] = MsgAuthConfig{ + Name: BlockProposal, + Type: func() interface{} { + return new(messages.BlockProposal) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.ConsensusCommittee: {flow.RoleConsensus}, + channels.PushBlocks: {flow.RoleConsensus}, // channel alias ReceiveBlocks = PushBlocks + }, + } + AuthorizationConfigs[BlockVote] = MsgAuthConfig{ + Name: BlockVote, + Type: func() interface{} { + return new(messages.BlockVote) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.ConsensusCommittee: {flow.RoleConsensus}, + }, + } + + // protocol state sync + AuthorizationConfigs[SyncRequest] = MsgAuthConfig{ + Name: SyncRequest, + Type: func() interface{} { + return new(messages.SyncRequest) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.SyncCommittee: flow.Roles(), + channels.SyncClusterPrefix: flow.Roles(), + }, + } + AuthorizationConfigs[SyncResponse] = MsgAuthConfig{ + Name: SyncResponse, + Type: func() interface{} { + return new(messages.SyncResponse) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.SyncCommittee: flow.Roles(), + channels.SyncClusterPrefix: flow.Roles(), + }, + } + AuthorizationConfigs[RangeRequest] = MsgAuthConfig{ + Name: RangeRequest, + Type: func() interface{} { + return new(messages.RangeRequest) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.SyncCommittee: flow.Roles(), + channels.SyncClusterPrefix: flow.Roles(), + }, + } + AuthorizationConfigs[BatchRequest] = MsgAuthConfig{ + Name: BatchRequest, + Type: func() interface{} { + return new(messages.BatchRequest) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.SyncCommittee: flow.Roles(), + channels.SyncClusterPrefix: flow.Roles(), + }, + } + AuthorizationConfigs[BlockResponse] = MsgAuthConfig{ + Name: BlockResponse, + Type: func() interface{} { + return new(messages.BlockResponse) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.SyncCommittee: flow.Roles(), + channels.SyncClusterPrefix: flow.Roles(), + }, + } + + // cluster consensus + AuthorizationConfigs[ClusterBlockProposal] = MsgAuthConfig{ + Name: ClusterBlockProposal, + Type: func() interface{} { + return new(messages.ClusterBlockProposal) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.ConsensusClusterPrefix: {flow.RoleCollection}, + }, + } + AuthorizationConfigs[ClusterBlockVote] = MsgAuthConfig{ + Name: ClusterBlockVote, + Type: func() interface{} { + return new(messages.ClusterBlockVote) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.ConsensusClusterPrefix: {flow.RoleCollection}, + }, + } + AuthorizationConfigs[ClusterBlockResponse] = MsgAuthConfig{ + Name: ClusterBlockResponse, + Type: func() interface{} { + return new(messages.ClusterBlockResponse) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.ConsensusClusterPrefix: {flow.RoleCollection}, + }, + } + + // collections, guarantees & transactions + AuthorizationConfigs[CollectionGuarantee] = MsgAuthConfig{ + Name: CollectionGuarantee, + Type: func() interface{} { + return new(flow.CollectionGuarantee) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.PushGuarantees: {flow.RoleCollection}, // channel alias ReceiveGuarantees = PushGuarantees + }, + } + AuthorizationConfigs[Transaction] = MsgAuthConfig{ + Name: Transaction, + Type: func() interface{} { + return new(flow.Transaction) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.PushTransactions: {flow.RoleCollection}, // channel alias ReceiveTransactions = PushTransactions + }, + } + AuthorizationConfigs[TransactionBody] = MsgAuthConfig{ + Name: TransactionBody, + Type: func() interface{} { + return new(flow.TransactionBody) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.PushTransactions: {flow.RoleCollection}, // channel alias ReceiveTransactions = PushTransactions + }, + } + + // core messages for execution & verification + AuthorizationConfigs[ExecutionReceipt] = MsgAuthConfig{ + Name: ExecutionReceipt, + Type: func() interface{} { + return new(flow.ExecutionReceipt) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.PushReceipts: {flow.RoleExecution}, // channel alias ReceiveReceipts = PushReceipts + }, + } + AuthorizationConfigs[ResultApproval] = MsgAuthConfig{ + Name: ResultApproval, + Type: func() interface{} { + return new(flow.ResultApproval) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.PushApprovals: {flow.RoleVerification}, // channel alias ReceiveApprovals = PushApprovals + }, + } + + // [deprecated] execution state synchronization + AuthorizationConfigs[ExecutionStateSyncRequest] = MsgAuthConfig{ + Name: ExecutionStateSyncRequest, + Config: nil, + } + AuthorizationConfigs[ExecutionStateDelta] = MsgAuthConfig{ + Name: ExecutionStateDelta, + Config: nil, + } + + // data exchange for execution of blocks + AuthorizationConfigs[ChunkDataRequest] = MsgAuthConfig{ + Name: ChunkDataRequest, + Type: func() interface{} { + return new(messages.ChunkDataRequest) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.ProvideChunks: {flow.RoleVerification}, // channel alias RequestChunks = ProvideChunks + channels.RequestCollections: {flow.RoleVerification}, + channels.RequestApprovalsByChunk: {flow.RoleVerification}, + channels.RequestReceiptsByBlockID: {flow.RoleVerification}, + }, + } + AuthorizationConfigs[ChunkDataResponse] = MsgAuthConfig{ + Name: ChunkDataResponse, + Type: func() interface{} { + return new(messages.ChunkDataResponse) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.ProvideChunks: {flow.RoleExecution}, // channel alias RequestChunks = ProvideChunks + channels.RequestCollections: {flow.RoleExecution}, + channels.RequestApprovalsByChunk: {flow.RoleExecution}, + channels.RequestReceiptsByBlockID: {flow.RoleExecution}, + }, + } + + // result approvals + AuthorizationConfigs[ApprovalRequest] = MsgAuthConfig{ + Name: ApprovalRequest, + Type: func() interface{} { + return new(messages.ApprovalRequest) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.ProvideApprovalsByChunk: {flow.RoleConsensus}, + }, + } + AuthorizationConfigs[ApprovalResponse] = MsgAuthConfig{ + Name: ApprovalResponse, + Type: func() interface{} { + return new(messages.ApprovalResponse) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.ProvideApprovalsByChunk: {flow.RoleVerification}, + }, + } + + // generic entity exchange engines + AuthorizationConfigs[EntityRequest] = MsgAuthConfig{ + Name: EntityRequest, + Type: func() interface{} { + return new(messages.EntityRequest) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.RequestChunks: {flow.RoleAccess, flow.RoleConsensus, flow.RoleCollection}, + channels.RequestCollections: {flow.RoleAccess, flow.RoleConsensus, flow.RoleCollection}, + channels.RequestApprovalsByChunk: {flow.RoleAccess, flow.RoleConsensus, flow.RoleCollection}, + channels.RequestReceiptsByBlockID: {flow.RoleAccess, flow.RoleConsensus, flow.RoleCollection}, + }, + } + AuthorizationConfigs[EntityResponse] = MsgAuthConfig{ + Name: EntityResponse, + Type: func() interface{} { + return new(messages.EntityResponse) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.RequestChunks: {flow.RoleCollection, flow.RoleExecution}, + channels.RequestCollections: {flow.RoleCollection, flow.RoleExecution}, + channels.RequestApprovalsByChunk: {flow.RoleCollection, flow.RoleExecution}, + channels.RequestReceiptsByBlockID: {flow.RoleCollection, flow.RoleExecution}, + }, + } + + // testing + AuthorizationConfigs[TestMessage] = MsgAuthConfig{ + Name: TestMessage, + Type: func() interface{} { + return new(message.TestMessage) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.TestNetworkChannel: flow.Roles(), + channels.TestMetricsChannel: flow.Roles(), + }, + } + + // DKG + AuthorizationConfigs[DKGMessage] = MsgAuthConfig{ + Name: DKGMessage, + Type: func() interface{} { + return new(messages.DKGMessage) + }, + Config: map[channels.Channel]flow.RoleList{ + channels.DKGCommittee: {flow.RoleConsensus}, + }, + } +} + +// GetMessageAuthConfig checks the underlying type and returns the correct +// message auth Config. +// Expected error returns during normal operations: +// * ErrUnknownMsgType : if underlying type of v does not match any of the known message types +func GetMessageAuthConfig(v interface{}) (MsgAuthConfig, error) { + switch v.(type) { + // consensus + case *messages.BlockProposal: + return AuthorizationConfigs[BlockProposal], nil + case *messages.BlockVote: + return AuthorizationConfigs[BlockVote], nil + + // protocol state sync + case *messages.SyncRequest: + return AuthorizationConfigs[SyncRequest], nil + case *messages.SyncResponse: + return AuthorizationConfigs[SyncResponse], nil + case *messages.RangeRequest: + return AuthorizationConfigs[RangeRequest], nil + case *messages.BatchRequest: + return AuthorizationConfigs[BatchRequest], nil + case *messages.BlockResponse: + return AuthorizationConfigs[BlockResponse], nil + + // cluster consensus + case *messages.ClusterBlockProposal: + return AuthorizationConfigs[ClusterBlockProposal], nil + case *messages.ClusterBlockVote: + return AuthorizationConfigs[ClusterBlockVote], nil + case *messages.ClusterBlockResponse: + return AuthorizationConfigs[ClusterBlockResponse], nil + + // collections, guarantees & transactions + case *flow.CollectionGuarantee: + return AuthorizationConfigs[CollectionGuarantee], nil + case *flow.TransactionBody: + return AuthorizationConfigs[TransactionBody], nil + case *flow.Transaction: + return AuthorizationConfigs[Transaction], nil + + // core messages for execution & verification + case *flow.ExecutionReceipt: + return AuthorizationConfigs[ExecutionReceipt], nil + case *flow.ResultApproval: + return AuthorizationConfigs[ResultApproval], nil + + // execution state synchronization + case *messages.ExecutionStateSyncRequest: + return AuthorizationConfigs[ExecutionStateSyncRequest], nil + case *messages.ExecutionStateDelta: + return AuthorizationConfigs[ExecutionStateDelta], nil + + // data exchange for execution of blocks + case *messages.ChunkDataRequest: + return AuthorizationConfigs[ChunkDataRequest], nil + case *messages.ChunkDataResponse: + return AuthorizationConfigs[ChunkDataResponse], nil + + // result approvals + case *messages.ApprovalRequest: + return AuthorizationConfigs[ApprovalRequest], nil + case *messages.ApprovalResponse: + return AuthorizationConfigs[ApprovalResponse], nil + + // generic entity exchange engines + case *messages.EntityRequest: + return AuthorizationConfigs[EntityRequest], nil + case *messages.EntityResponse: + return AuthorizationConfigs[EntityResponse], nil + + // testing + case *message.TestMessage: + return AuthorizationConfigs[TestMessage], nil + + // dkg + case *messages.DKGMessage: + return AuthorizationConfigs[DKGMessage], nil + + default: + return MsgAuthConfig{}, fmt.Errorf("%w (%T)", ErrUnknownMsgType, v) + } +} diff --git a/network/message/init.go b/network/message/init.go new file mode 100644 index 00000000000..e69886cb47e --- /dev/null +++ b/network/message/init.go @@ -0,0 +1,36 @@ +package message + +// init is called first time this package is imported. +// It creates and initializes AuthorizationConfigs for each message type. +func init() { + initializeMessageAuthConfigsMap() +} + +// string constants for all message types sent on the network +const ( + BlockProposal = "BlockProposal" + BlockVote = "BlockVote" + SyncRequest = "SyncRequest" + SyncResponse = "SyncResponse" + RangeRequest = "RangeRequest" + BatchRequest = "BatchRequest" + BlockResponse = "BlockResponse" + ClusterBlockProposal = "ClusterBlockProposal" + ClusterBlockVote = "ClusterBlockVote" + ClusterBlockResponse = "ClusterBlockResponse" + CollectionGuarantee = "CollectionGuarantee" + Transaction = "Transaction" + TransactionBody = "TransactionBody" + ExecutionReceipt = "ExecutionReceipt" + ResultApproval = "ResultApproval" + ExecutionStateSyncRequest = "ExecutionStateSyncRequest" + ExecutionStateDelta = "ExecutionStateDelta" + ChunkDataRequest = "ChunkDataRequest" + ChunkDataResponse = "ChunkDataResponse" + ApprovalRequest = "ApprovalRequest" + ApprovalResponse = "ApprovalResponse" + EntityRequest = "EntityRequest" + EntityResponse = "EntityResponse" + TestMessage = "TestMessage" + DKGMessage = "DKGMessage" +) diff --git a/network/message_authorization_config.go b/network/message_authorization_config.go deleted file mode 100644 index 5254097f53c..00000000000 --- a/network/message_authorization_config.go +++ /dev/null @@ -1,393 +0,0 @@ -package network - -import ( - "errors" - "fmt" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/libp2p/message" - "github.com/onflow/flow-go/model/messages" -) - -// MsgAuthConfig contains authorization information for a specific flow message. The authorization -// is represented as a map from network channel -> list of all roles allowed to send the message on -// the channel. -type MsgAuthConfig struct { - String string - Interface func() interface{} - Config map[Channel]flow.RoleList -} - -// IsAuthorized checks if the specified role is authorized to send the message on channel and -// asserts that the message is authorized to be sent on channel. -// Expected error returns during normal operations: -// * ErrUnauthorizedMessageOnChannel: if channel does not exist in message config -// * ErrUnauthorizedRole: if list of authorized roles for message config does not include role -func (m MsgAuthConfig) IsAuthorized(role flow.Role, channel Channel) error { - authorizedRoles, ok := m.Config[channel] - if !ok { - return ErrUnauthorizedMessageOnChannel - } - - if !authorizedRoles.Contains(role) { - return ErrUnauthorizedRole - } - - return nil -} - -var ( - ErrUnknownMsgType = errors.New("could not get authorization Config for unknown message type") - ErrUnauthorizedMessageOnChannel = errors.New("message is not authorized to be sent on channel") - ErrUnauthorizedRole = errors.New("sender role not authorized to send message on channel") - MessageAuthConfigs map[string]MsgAuthConfig -) - -// init is called first time this package is imported. -// It creates and initializes channelRoleMap and clusterChannelPrefixRoleMap. -func init() { - initializeMessageAuthConfigsMap() -} - -func initializeMessageAuthConfigsMap() { - MessageAuthConfigs = make(map[string]MsgAuthConfig) - - // consensus - MessageAuthConfigs["BlockProposal"] = MsgAuthConfig{ - String: "BlockProposal", - Interface: func() interface{} { - return new(messages.BlockProposal) - }, - Config: map[Channel]flow.RoleList{ - ConsensusCommittee: {flow.RoleConsensus}, - PushBlocks: {flow.RoleConsensus}, // channel alias ReceiveBlocks = PushBlocks - }, - } - MessageAuthConfigs["BlockVote"] = MsgAuthConfig{ - String: "BlockVote", - Interface: func() interface{} { - return new(messages.BlockVote) - }, - Config: map[Channel]flow.RoleList{ - ConsensusCommittee: {flow.RoleConsensus}, - }, - } - - // protocol state sync - MessageAuthConfigs["SyncRequest"] = MsgAuthConfig{ - String: "SyncRequest", - Interface: func() interface{} { - return new(messages.SyncRequest) - }, - Config: map[Channel]flow.RoleList{ - SyncCommittee: flow.Roles(), - SyncClusterPrefix: flow.Roles(), - }, - } - MessageAuthConfigs["SyncResponse"] = MsgAuthConfig{ - String: "SyncResponse", - Interface: func() interface{} { - return new(messages.SyncResponse) - }, - Config: map[Channel]flow.RoleList{ - SyncCommittee: flow.Roles(), - SyncClusterPrefix: flow.Roles(), - }, - } - MessageAuthConfigs["RangeRequest"] = MsgAuthConfig{ - String: "RangeRequest", - Interface: func() interface{} { - return new(messages.RangeRequest) - }, - Config: map[Channel]flow.RoleList{ - SyncCommittee: flow.Roles(), - SyncClusterPrefix: flow.Roles(), - }, - } - MessageAuthConfigs["BatchRequest"] = MsgAuthConfig{ - String: "BatchRequest", - Interface: func() interface{} { - return new(messages.BatchRequest) - }, - Config: map[Channel]flow.RoleList{ - SyncCommittee: flow.Roles(), - SyncClusterPrefix: flow.Roles(), - }, - } - MessageAuthConfigs["BlockResponse"] = MsgAuthConfig{ - String: "BlockResponse", - Interface: func() interface{} { - return new(messages.BlockResponse) - }, - Config: map[Channel]flow.RoleList{ - SyncCommittee: flow.Roles(), - SyncClusterPrefix: flow.Roles(), - }, - } - - // cluster consensus - MessageAuthConfigs["ClusterBlockProposal"] = MsgAuthConfig{ - String: "ClusterBlockProposal", - Interface: func() interface{} { - return new(messages.ClusterBlockProposal) - }, - Config: map[Channel]flow.RoleList{ - ConsensusClusterPrefix: {flow.RoleCollection}, - }, - } - MessageAuthConfigs["ClusterBlockVote"] = MsgAuthConfig{ - String: "ClusterBlockVote", - Interface: func() interface{} { - return new(messages.ClusterBlockVote) - }, - Config: map[Channel]flow.RoleList{ - ConsensusClusterPrefix: {flow.RoleCollection}, - }, - } - MessageAuthConfigs["ClusterBlockResponse"] = MsgAuthConfig{ - String: "ClusterBlockResponse", - Interface: func() interface{} { - return new(messages.ClusterBlockResponse) - }, - Config: map[Channel]flow.RoleList{ - ConsensusClusterPrefix: {flow.RoleCollection}, - }, - } - - // collections, guarantees & transactions - MessageAuthConfigs["CollectionGuarantee"] = MsgAuthConfig{ - String: "CollectionGuarantee", - Interface: func() interface{} { - return new(flow.CollectionGuarantee) - }, - Config: map[Channel]flow.RoleList{ - PushGuarantees: {flow.RoleCollection}, // channel alias ReceiveGuarantees = PushGuarantees - }, - } - MessageAuthConfigs["Transaction"] = MsgAuthConfig{ - String: "Transaction", - Interface: func() interface{} { - return new(flow.Transaction) - }, - Config: map[Channel]flow.RoleList{ - PushTransactions: {flow.RoleCollection}, // channel alias ReceiveTransactions = PushTransactions - }, - } - MessageAuthConfigs["TransactionBody"] = MsgAuthConfig{ - String: "TransactionBody", - Interface: func() interface{} { - return new(flow.TransactionBody) - }, - Config: map[Channel]flow.RoleList{ - PushTransactions: {flow.RoleCollection}, // channel alias ReceiveTransactions = PushTransactions - }, - } - - // core messages for execution & verification - MessageAuthConfigs["ExecutionReceipt"] = MsgAuthConfig{ - String: "ExecutionReceipt", - Interface: func() interface{} { - return new(flow.ExecutionReceipt) - }, - Config: map[Channel]flow.RoleList{ - PushReceipts: {flow.RoleExecution}, // channel alias ReceiveReceipts = PushReceipts - }, - } - MessageAuthConfigs["ResultApproval"] = MsgAuthConfig{ - String: "ResultApproval", - Interface: func() interface{} { - return new(flow.ResultApproval) - }, - Config: map[Channel]flow.RoleList{ - PushApprovals: {flow.RoleVerification}, // channel alias ReceiveApprovals = PushApprovals - }, - } - - // [deprecated] execution state synchronization - MessageAuthConfigs["ExecutionStateSyncRequest"] = MsgAuthConfig{ - String: "ExecutionStateSyncRequest", - Config: nil, - } - MessageAuthConfigs["ExecutionStateDelta"] = MsgAuthConfig{ - String: "ExecutionStateDelta", - Config: nil, - } - - // data exchange for execution of blocks - MessageAuthConfigs["ChunkDataRequest"] = MsgAuthConfig{ - String: "ChunkDataRequest", - Interface: func() interface{} { - return new(messages.ChunkDataRequest) - }, - Config: map[Channel]flow.RoleList{ - ProvideChunks: {flow.RoleVerification}, // channel alias RequestChunks = ProvideChunks - RequestCollections: {flow.RoleVerification}, - RequestApprovalsByChunk: {flow.RoleVerification}, - RequestReceiptsByBlockID: {flow.RoleVerification}, - }, - } - MessageAuthConfigs["ChunkDataResponse"] = MsgAuthConfig{ - String: "ChunkDataResponse", - Interface: func() interface{} { - return new(messages.ChunkDataResponse) - }, - Config: map[Channel]flow.RoleList{ - ProvideChunks: {flow.RoleExecution}, // channel alias RequestChunks = ProvideChunks - RequestCollections: {flow.RoleExecution}, - RequestApprovalsByChunk: {flow.RoleExecution}, - RequestReceiptsByBlockID: {flow.RoleExecution}, - }, - } - - // result approvals - MessageAuthConfigs["ApprovalRequest"] = MsgAuthConfig{ - String: "ApprovalRequest", - Interface: func() interface{} { - return new(messages.ApprovalRequest) - }, - Config: map[Channel]flow.RoleList{ - ProvideApprovalsByChunk: {flow.RoleConsensus}, - }, - } - MessageAuthConfigs["ApprovalResponse"] = MsgAuthConfig{ - String: "ApprovalResponse", - Interface: func() interface{} { - return new(messages.ApprovalResponse) - }, - Config: map[Channel]flow.RoleList{ - ProvideApprovalsByChunk: {flow.RoleVerification}, - }, - } - - // generic entity exchange engines - MessageAuthConfigs["EntityRequest"] = MsgAuthConfig{ - String: "EntityRequest", - Interface: func() interface{} { - return new(messages.EntityRequest) - }, - Config: map[Channel]flow.RoleList{ - RequestChunks: {flow.RoleAccess, flow.RoleConsensus, flow.RoleCollection}, - RequestCollections: {flow.RoleAccess, flow.RoleConsensus, flow.RoleCollection}, - RequestApprovalsByChunk: {flow.RoleAccess, flow.RoleConsensus, flow.RoleCollection}, - RequestReceiptsByBlockID: {flow.RoleAccess, flow.RoleConsensus, flow.RoleCollection}, - }, - } - MessageAuthConfigs["EntityResponse"] = MsgAuthConfig{ - String: "EntityResponse", - Interface: func() interface{} { - return new(messages.EntityResponse) - }, - Config: map[Channel]flow.RoleList{ - RequestChunks: {flow.RoleCollection, flow.RoleExecution}, - RequestCollections: {flow.RoleCollection, flow.RoleExecution}, - RequestApprovalsByChunk: {flow.RoleCollection, flow.RoleExecution}, - RequestReceiptsByBlockID: {flow.RoleCollection, flow.RoleExecution}, - }, - } - - // testing - MessageAuthConfigs["TestMessage"] = MsgAuthConfig{ - String: "TestMessage", - Interface: func() interface{} { - return new(message.TestMessage) - }, - Config: map[Channel]flow.RoleList{ - TestNetworkChannel: flow.Roles(), - TestMetricsChannel: flow.Roles(), - }, - } - - // DKG - MessageAuthConfigs["DKGMessage"] = MsgAuthConfig{ - String: "DKGMessage", - Interface: func() interface{} { - return new(messages.DKGMessage) - }, - Config: map[Channel]flow.RoleList{ - DKGCommittee: {flow.RoleConsensus}, - }, - } -} - -// GetMessageAuthConfig checks the underlying type and returns the correct -// message auth Config. -// Expected error returns during normal operations: -// * ErrUnknownMsgType : if underlying type of v does not match any of the known message types -func GetMessageAuthConfig(v interface{}) (MsgAuthConfig, error) { - switch v.(type) { - // consensus - case *messages.BlockProposal: - return MessageAuthConfigs["BlockProposal"], nil - case *messages.BlockVote: - return MessageAuthConfigs["BlockVote"], nil - - // protocol state sync - case *messages.SyncRequest: - return MessageAuthConfigs["SyncRequest"], nil - case *messages.SyncResponse: - return MessageAuthConfigs["SyncResponse"], nil - case *messages.RangeRequest: - return MessageAuthConfigs["RangeRequest"], nil - case *messages.BatchRequest: - return MessageAuthConfigs["BatchRequest"], nil - case *messages.BlockResponse: - return MessageAuthConfigs["BlockResponse"], nil - - // cluster consensus - case *messages.ClusterBlockProposal: - return MessageAuthConfigs["ClusterBlockProposal"], nil - case *messages.ClusterBlockVote: - return MessageAuthConfigs["ClusterBlockVote"], nil - case *messages.ClusterBlockResponse: - return MessageAuthConfigs["ClusterBlockResponse"], nil - - // collections, guarantees & transactions - case *flow.CollectionGuarantee: - return MessageAuthConfigs["CollectionGuarantee"], nil - case *flow.TransactionBody: - return MessageAuthConfigs["TransactionBody"], nil - case *flow.Transaction: - return MessageAuthConfigs["Transaction"], nil - - // core messages for execution & verification - case *flow.ExecutionReceipt: - return MessageAuthConfigs["ExecutionReceipt"], nil - case *flow.ResultApproval: - return MessageAuthConfigs["ResultApproval"], nil - - // execution state synchronization - case *messages.ExecutionStateSyncRequest: - return MessageAuthConfigs["ExecutionStateSyncRequest"], nil - case *messages.ExecutionStateDelta: - return MessageAuthConfigs["ExecutionStateDelta"], nil - - // data exchange for execution of blocks - case *messages.ChunkDataRequest: - return MessageAuthConfigs["ChunkDataRequest"], nil - case *messages.ChunkDataResponse: - return MessageAuthConfigs["ChunkDataResponse"], nil - - // result approvals - case *messages.ApprovalRequest: - return MessageAuthConfigs["ApprovalRequest"], nil - case *messages.ApprovalResponse: - return MessageAuthConfigs["ApprovalResponse"], nil - - // generic entity exchange engines - case *messages.EntityRequest: - return MessageAuthConfigs["EntityRequest"], nil - case *messages.EntityResponse: - return MessageAuthConfigs["EntityResponse"], nil - - // testing - case *message.TestMessage: - return MessageAuthConfigs["TestMessage"], nil - - // dkg - case *messages.DKGMessage: - return MessageAuthConfigs["DKGMessage"], nil - - default: - return MsgAuthConfig{}, fmt.Errorf("%w (%T)", ErrUnknownMsgType, v) - } -} diff --git a/network/middleware.go b/network/middleware.go index f6282b2f3a8..0eadc5a6c23 100644 --- a/network/middleware.go +++ b/network/middleware.go @@ -6,20 +6,12 @@ import ( "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/protocol" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/message" ) -// Topic is the internal type of Libp2p which corresponds to the Channel in the network level. -// It is a virtual medium enabling nodes to subscribe and communicate over epidemic dissemination. -type Topic string - -func (t Topic) String() string { - return string(t) -} - // Middleware represents the middleware layer, which manages the connections to // our direct neighbours on the network. It handles the creation & teardown of // connections, as well as reading & writing to/from the connections. @@ -40,20 +32,20 @@ type Middleware interface { // Publish publishes a message on the channel. It models a distributed broadcast where the message is meant for all or // a many nodes subscribing to the channel. It does not guarantee the delivery though, and operates on a best // effort. - Publish(msg *message.Message, channel Channel) error + Publish(msg *message.Message, channel channels.Channel) error // Subscribe subscribes the middleware to a channel. - Subscribe(channel Channel) error + Subscribe(channel channels.Channel) error // Unsubscribe unsubscribes the middleware from a channel. - Unsubscribe(channel Channel) error + Unsubscribe(channel channels.Channel) error // UpdateNodeAddresses fetches and updates the addresses of all the authorized participants // in the Flow protocol. UpdateNodeAddresses() // NewBlobService creates a new BlobService for the given channel. - NewBlobService(channel Channel, store datastore.Batching, opts ...BlobServiceOption) BlobService + NewBlobService(channel channels.Channel, store datastore.Batching, opts ...BlobServiceOption) BlobService // NewPingService creates a new PingService for the given ping protocol ID. NewPingService(pingProtocol protocol.ID, provider PingInfoProvider) PingService diff --git a/network/mocknetwork/adapter.go b/network/mocknetwork/adapter.go index 60021365f83..0238b7e5a4b 100644 --- a/network/mocknetwork/adapter.go +++ b/network/mocknetwork/adapter.go @@ -4,9 +4,9 @@ package mocknetwork import ( flow "github.com/onflow/flow-go/model/flow" - mock "github.com/stretchr/testify/mock" + channels "github.com/onflow/flow-go/network/channels" - network "github.com/onflow/flow-go/network" + mock "github.com/stretchr/testify/mock" ) // Adapter is an autogenerated mock type for the Adapter type @@ -15,7 +15,7 @@ type Adapter struct { } // MulticastOnChannel provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *Adapter) MulticastOnChannel(_a0 network.Channel, _a1 interface{}, _a2 uint, _a3 ...flow.Identifier) error { +func (_m *Adapter) MulticastOnChannel(_a0 channels.Channel, _a1 interface{}, _a2 uint, _a3 ...flow.Identifier) error { _va := make([]interface{}, len(_a3)) for _i := range _a3 { _va[_i] = _a3[_i] @@ -26,7 +26,7 @@ func (_m *Adapter) MulticastOnChannel(_a0 network.Channel, _a1 interface{}, _a2 ret := _m.Called(_ca...) var r0 error - if rf, ok := ret.Get(0).(func(network.Channel, interface{}, uint, ...flow.Identifier) error); ok { + if rf, ok := ret.Get(0).(func(channels.Channel, interface{}, uint, ...flow.Identifier) error); ok { r0 = rf(_a0, _a1, _a2, _a3...) } else { r0 = ret.Error(0) @@ -36,7 +36,7 @@ func (_m *Adapter) MulticastOnChannel(_a0 network.Channel, _a1 interface{}, _a2 } // PublishOnChannel provides a mock function with given fields: _a0, _a1, _a2 -func (_m *Adapter) PublishOnChannel(_a0 network.Channel, _a1 interface{}, _a2 ...flow.Identifier) error { +func (_m *Adapter) PublishOnChannel(_a0 channels.Channel, _a1 interface{}, _a2 ...flow.Identifier) error { _va := make([]interface{}, len(_a2)) for _i := range _a2 { _va[_i] = _a2[_i] @@ -47,7 +47,7 @@ func (_m *Adapter) PublishOnChannel(_a0 network.Channel, _a1 interface{}, _a2 .. ret := _m.Called(_ca...) var r0 error - if rf, ok := ret.Get(0).(func(network.Channel, interface{}, ...flow.Identifier) error); ok { + if rf, ok := ret.Get(0).(func(channels.Channel, interface{}, ...flow.Identifier) error); ok { r0 = rf(_a0, _a1, _a2...) } else { r0 = ret.Error(0) @@ -57,11 +57,11 @@ func (_m *Adapter) PublishOnChannel(_a0 network.Channel, _a1 interface{}, _a2 .. } // UnRegisterChannel provides a mock function with given fields: channel -func (_m *Adapter) UnRegisterChannel(channel network.Channel) error { +func (_m *Adapter) UnRegisterChannel(channel channels.Channel) error { ret := _m.Called(channel) var r0 error - if rf, ok := ret.Get(0).(func(network.Channel) error); ok { + if rf, ok := ret.Get(0).(func(channels.Channel) error); ok { r0 = rf(channel) } else { r0 = ret.Error(0) @@ -71,11 +71,11 @@ func (_m *Adapter) UnRegisterChannel(channel network.Channel) error { } // UnicastOnChannel provides a mock function with given fields: _a0, _a1, _a2 -func (_m *Adapter) UnicastOnChannel(_a0 network.Channel, _a1 interface{}, _a2 flow.Identifier) error { +func (_m *Adapter) UnicastOnChannel(_a0 channels.Channel, _a1 interface{}, _a2 flow.Identifier) error { ret := _m.Called(_a0, _a1, _a2) var r0 error - if rf, ok := ret.Get(0).(func(network.Channel, interface{}, flow.Identifier) error); ok { + if rf, ok := ret.Get(0).(func(channels.Channel, interface{}, flow.Identifier) error); ok { r0 = rf(_a0, _a1, _a2) } else { r0 = ret.Error(0) diff --git a/network/mocknetwork/conduit_factory.go b/network/mocknetwork/conduit_factory.go index 409df448987..078364ff437 100644 --- a/network/mocknetwork/conduit_factory.go +++ b/network/mocknetwork/conduit_factory.go @@ -5,8 +5,11 @@ package mocknetwork import ( context "context" - network "github.com/onflow/flow-go/network" + channels "github.com/onflow/flow-go/network/channels" + mock "github.com/stretchr/testify/mock" + + network "github.com/onflow/flow-go/network" ) // ConduitFactory is an autogenerated mock type for the ConduitFactory type @@ -15,11 +18,11 @@ type ConduitFactory struct { } // NewConduit provides a mock function with given fields: _a0, _a1 -func (_m *ConduitFactory) NewConduit(_a0 context.Context, _a1 network.Channel) (network.Conduit, error) { +func (_m *ConduitFactory) NewConduit(_a0 context.Context, _a1 channels.Channel) (network.Conduit, error) { ret := _m.Called(_a0, _a1) var r0 network.Conduit - if rf, ok := ret.Get(0).(func(context.Context, network.Channel) network.Conduit); ok { + if rf, ok := ret.Get(0).(func(context.Context, channels.Channel) network.Conduit); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -28,7 +31,7 @@ func (_m *ConduitFactory) NewConduit(_a0 context.Context, _a1 network.Channel) ( } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, network.Channel) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, channels.Channel) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) diff --git a/network/mocknetwork/engine.go b/network/mocknetwork/engine.go index 30fd96ac020..d0d87febfb5 100644 --- a/network/mocknetwork/engine.go +++ b/network/mocknetwork/engine.go @@ -4,9 +4,9 @@ package mocknetwork import ( flow "github.com/onflow/flow-go/model/flow" - mock "github.com/stretchr/testify/mock" + channels "github.com/onflow/flow-go/network/channels" - network "github.com/onflow/flow-go/network" + mock "github.com/stretchr/testify/mock" ) // Engine is an autogenerated mock type for the Engine type @@ -31,11 +31,11 @@ func (_m *Engine) Done() <-chan struct{} { } // Process provides a mock function with given fields: channel, originID, event -func (_m *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (_m *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { ret := _m.Called(channel, originID, event) var r0 error - if rf, ok := ret.Get(0).(func(network.Channel, flow.Identifier, interface{}) error); ok { + if rf, ok := ret.Get(0).(func(channels.Channel, flow.Identifier, interface{}) error); ok { r0 = rf(channel, originID, event) } else { r0 = ret.Error(0) @@ -75,7 +75,7 @@ func (_m *Engine) Ready() <-chan struct{} { } // Submit provides a mock function with given fields: channel, originID, event -func (_m *Engine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (_m *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { _m.Called(channel, originID, event) } diff --git a/network/mocknetwork/message_processor.go b/network/mocknetwork/message_processor.go index bf5f2cfe939..bf95e599e60 100644 --- a/network/mocknetwork/message_processor.go +++ b/network/mocknetwork/message_processor.go @@ -4,9 +4,9 @@ package mocknetwork import ( flow "github.com/onflow/flow-go/model/flow" - mock "github.com/stretchr/testify/mock" + channels "github.com/onflow/flow-go/network/channels" - network "github.com/onflow/flow-go/network" + mock "github.com/stretchr/testify/mock" ) // MessageProcessor is an autogenerated mock type for the MessageProcessor type @@ -15,11 +15,11 @@ type MessageProcessor struct { } // Process provides a mock function with given fields: channel, originID, message -func (_m *MessageProcessor) Process(channel network.Channel, originID flow.Identifier, message interface{}) error { +func (_m *MessageProcessor) Process(channel channels.Channel, originID flow.Identifier, message interface{}) error { ret := _m.Called(channel, originID, message) var r0 error - if rf, ok := ret.Get(0).(func(network.Channel, flow.Identifier, interface{}) error); ok { + if rf, ok := ret.Get(0).(func(channels.Channel, flow.Identifier, interface{}) error); ok { r0 = rf(channel, originID, message) } else { r0 = ret.Error(0) diff --git a/network/mocknetwork/middleware.go b/network/mocknetwork/middleware.go index 376ee2b1ee8..4b5464286c0 100644 --- a/network/mocknetwork/middleware.go +++ b/network/mocknetwork/middleware.go @@ -4,6 +4,8 @@ package mocknetwork import ( datastore "github.com/ipfs/go-datastore" + channels "github.com/onflow/flow-go/network/channels" + flow "github.com/onflow/flow-go/model/flow" irrecoverable "github.com/onflow/flow-go/module/irrecoverable" @@ -60,7 +62,7 @@ func (_m *Middleware) IsConnected(nodeID flow.Identifier) (bool, error) { } // NewBlobService provides a mock function with given fields: channel, store, opts -func (_m *Middleware) NewBlobService(channel network.Channel, store datastore.Batching, opts ...network.BlobServiceOption) network.BlobService { +func (_m *Middleware) NewBlobService(channel channels.Channel, store datastore.Batching, opts ...network.BlobServiceOption) network.BlobService { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -71,7 +73,7 @@ func (_m *Middleware) NewBlobService(channel network.Channel, store datastore.Ba ret := _m.Called(_ca...) var r0 network.BlobService - if rf, ok := ret.Get(0).(func(network.Channel, datastore.Batching, ...network.BlobServiceOption) network.BlobService); ok { + if rf, ok := ret.Get(0).(func(channels.Channel, datastore.Batching, ...network.BlobServiceOption) network.BlobService); ok { r0 = rf(channel, store, opts...) } else { if ret.Get(0) != nil { @@ -99,11 +101,11 @@ func (_m *Middleware) NewPingService(pingProtocol protocol.ID, provider network. } // Publish provides a mock function with given fields: msg, channel -func (_m *Middleware) Publish(msg *message.Message, channel network.Channel) error { +func (_m *Middleware) Publish(msg *message.Message, channel channels.Channel) error { ret := _m.Called(msg, channel) var r0 error - if rf, ok := ret.Get(0).(func(*message.Message, network.Channel) error); ok { + if rf, ok := ret.Get(0).(func(*message.Message, channels.Channel) error); ok { r0 = rf(msg, channel) } else { r0 = ret.Error(0) @@ -153,11 +155,11 @@ func (_m *Middleware) Start(_a0 irrecoverable.SignalerContext) { } // Subscribe provides a mock function with given fields: channel -func (_m *Middleware) Subscribe(channel network.Channel) error { +func (_m *Middleware) Subscribe(channel channels.Channel) error { ret := _m.Called(channel) var r0 error - if rf, ok := ret.Get(0).(func(network.Channel) error); ok { + if rf, ok := ret.Get(0).(func(channels.Channel) error); ok { r0 = rf(channel) } else { r0 = ret.Error(0) @@ -167,11 +169,11 @@ func (_m *Middleware) Subscribe(channel network.Channel) error { } // Unsubscribe provides a mock function with given fields: channel -func (_m *Middleware) Unsubscribe(channel network.Channel) error { +func (_m *Middleware) Unsubscribe(channel channels.Channel) error { ret := _m.Called(channel) var r0 error - if rf, ok := ret.Get(0).(func(network.Channel) error); ok { + if rf, ok := ret.Get(0).(func(channels.Channel) error); ok { r0 = rf(channel) } else { r0 = ret.Error(0) diff --git a/network/mocknetwork/mock_network.go b/network/mocknetwork/mock_network.go index 11ae75bcf60..a8232be8762 100644 --- a/network/mocknetwork/mock_network.go +++ b/network/mocknetwork/mock_network.go @@ -10,6 +10,7 @@ import ( protocol "github.com/libp2p/go-libp2p-core/protocol" irrecoverable "github.com/onflow/flow-go/module/irrecoverable" network "github.com/onflow/flow-go/network" + channels "github.com/onflow/flow-go/network/channels" reflect "reflect" ) @@ -65,7 +66,7 @@ func (mr *MockNetworkMockRecorder) Ready() *gomock.Call { } // Register mocks base method -func (m *MockNetwork) Register(arg0 network.Channel, arg1 network.MessageProcessor) (network.Conduit, error) { +func (m *MockNetwork) Register(arg0 channels.Channel, arg1 network.MessageProcessor) (network.Conduit, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Register", arg0, arg1) ret0, _ := ret[0].(network.Conduit) @@ -80,7 +81,7 @@ func (mr *MockNetworkMockRecorder) Register(arg0, arg1 interface{}) *gomock.Call } // RegisterBlobService mocks base method -func (m *MockNetwork) RegisterBlobService(arg0 network.Channel, arg1 go_datastore.Batching, arg2 ...network.BlobServiceOption) (network.BlobService, error) { +func (m *MockNetwork) RegisterBlobService(arg0 channels.Channel, arg1 go_datastore.Batching, arg2 ...network.BlobServiceOption) (network.BlobService, error) { m.ctrl.T.Helper() varargs := []interface{}{arg0, arg1} for _, a := range arg2 { diff --git a/network/mocknetwork/network.go b/network/mocknetwork/network.go index 6dd759cfb4d..b46d7e6f2e0 100644 --- a/network/mocknetwork/network.go +++ b/network/mocknetwork/network.go @@ -4,7 +4,10 @@ package mocknetwork import ( datastore "github.com/ipfs/go-datastore" + channels "github.com/onflow/flow-go/network/channels" + irrecoverable "github.com/onflow/flow-go/module/irrecoverable" + mock "github.com/stretchr/testify/mock" network "github.com/onflow/flow-go/network" @@ -50,11 +53,11 @@ func (_m *Network) Ready() <-chan struct{} { } // Register provides a mock function with given fields: channel, messageProcessor -func (_m *Network) Register(channel network.Channel, messageProcessor network.MessageProcessor) (network.Conduit, error) { +func (_m *Network) Register(channel channels.Channel, messageProcessor network.MessageProcessor) (network.Conduit, error) { ret := _m.Called(channel, messageProcessor) var r0 network.Conduit - if rf, ok := ret.Get(0).(func(network.Channel, network.MessageProcessor) network.Conduit); ok { + if rf, ok := ret.Get(0).(func(channels.Channel, network.MessageProcessor) network.Conduit); ok { r0 = rf(channel, messageProcessor) } else { if ret.Get(0) != nil { @@ -63,7 +66,7 @@ func (_m *Network) Register(channel network.Channel, messageProcessor network.Me } var r1 error - if rf, ok := ret.Get(1).(func(network.Channel, network.MessageProcessor) error); ok { + if rf, ok := ret.Get(1).(func(channels.Channel, network.MessageProcessor) error); ok { r1 = rf(channel, messageProcessor) } else { r1 = ret.Error(1) @@ -73,7 +76,7 @@ func (_m *Network) Register(channel network.Channel, messageProcessor network.Me } // RegisterBlobService provides a mock function with given fields: channel, store, opts -func (_m *Network) RegisterBlobService(channel network.Channel, store datastore.Batching, opts ...network.BlobServiceOption) (network.BlobService, error) { +func (_m *Network) RegisterBlobService(channel channels.Channel, store datastore.Batching, opts ...network.BlobServiceOption) (network.BlobService, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -84,7 +87,7 @@ func (_m *Network) RegisterBlobService(channel network.Channel, store datastore. ret := _m.Called(_ca...) var r0 network.BlobService - if rf, ok := ret.Get(0).(func(network.Channel, datastore.Batching, ...network.BlobServiceOption) network.BlobService); ok { + if rf, ok := ret.Get(0).(func(channels.Channel, datastore.Batching, ...network.BlobServiceOption) network.BlobService); ok { r0 = rf(channel, store, opts...) } else { if ret.Get(0) != nil { @@ -93,7 +96,7 @@ func (_m *Network) RegisterBlobService(channel network.Channel, store datastore. } var r1 error - if rf, ok := ret.Get(1).(func(network.Channel, datastore.Batching, ...network.BlobServiceOption) error); ok { + if rf, ok := ret.Get(1).(func(channels.Channel, datastore.Batching, ...network.BlobServiceOption) error); ok { r1 = rf(channel, store, opts...) } else { r1 = ret.Error(1) diff --git a/network/mocknetwork/subscription_manager.go b/network/mocknetwork/subscription_manager.go index 6a0e55042b4..50cb7315fb4 100644 --- a/network/mocknetwork/subscription_manager.go +++ b/network/mocknetwork/subscription_manager.go @@ -3,8 +3,10 @@ package mocknetwork import ( - network "github.com/onflow/flow-go/network" + channels "github.com/onflow/flow-go/network/channels" mock "github.com/stretchr/testify/mock" + + network "github.com/onflow/flow-go/network" ) // SubscriptionManager is an autogenerated mock type for the SubscriptionManager type @@ -13,15 +15,15 @@ type SubscriptionManager struct { } // Channels provides a mock function with given fields: -func (_m *SubscriptionManager) Channels() network.ChannelList { +func (_m *SubscriptionManager) Channels() channels.ChannelList { ret := _m.Called() - var r0 network.ChannelList - if rf, ok := ret.Get(0).(func() network.ChannelList); ok { + var r0 channels.ChannelList + if rf, ok := ret.Get(0).(func() channels.ChannelList); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(network.ChannelList) + r0 = ret.Get(0).(channels.ChannelList) } } @@ -29,11 +31,11 @@ func (_m *SubscriptionManager) Channels() network.ChannelList { } // GetEngine provides a mock function with given fields: channel -func (_m *SubscriptionManager) GetEngine(channel network.Channel) (network.MessageProcessor, error) { +func (_m *SubscriptionManager) GetEngine(channel channels.Channel) (network.MessageProcessor, error) { ret := _m.Called(channel) var r0 network.MessageProcessor - if rf, ok := ret.Get(0).(func(network.Channel) network.MessageProcessor); ok { + if rf, ok := ret.Get(0).(func(channels.Channel) network.MessageProcessor); ok { r0 = rf(channel) } else { if ret.Get(0) != nil { @@ -42,7 +44,7 @@ func (_m *SubscriptionManager) GetEngine(channel network.Channel) (network.Messa } var r1 error - if rf, ok := ret.Get(1).(func(network.Channel) error); ok { + if rf, ok := ret.Get(1).(func(channels.Channel) error); ok { r1 = rf(channel) } else { r1 = ret.Error(1) @@ -52,11 +54,11 @@ func (_m *SubscriptionManager) GetEngine(channel network.Channel) (network.Messa } // Register provides a mock function with given fields: channel, engine -func (_m *SubscriptionManager) Register(channel network.Channel, engine network.MessageProcessor) error { +func (_m *SubscriptionManager) Register(channel channels.Channel, engine network.MessageProcessor) error { ret := _m.Called(channel, engine) var r0 error - if rf, ok := ret.Get(0).(func(network.Channel, network.MessageProcessor) error); ok { + if rf, ok := ret.Get(0).(func(channels.Channel, network.MessageProcessor) error); ok { r0 = rf(channel, engine) } else { r0 = ret.Error(0) @@ -66,11 +68,11 @@ func (_m *SubscriptionManager) Register(channel network.Channel, engine network. } // Unregister provides a mock function with given fields: channel -func (_m *SubscriptionManager) Unregister(channel network.Channel) error { +func (_m *SubscriptionManager) Unregister(channel channels.Channel) error { ret := _m.Called(channel) var r0 error - if rf, ok := ret.Get(0).(func(network.Channel) error); ok { + if rf, ok := ret.Get(0).(func(channels.Channel) error); ok { r0 = rf(channel) } else { r0 = ret.Error(0) diff --git a/network/mocknetwork/topology.go b/network/mocknetwork/topology.go index 7d3d5b53a64..3e4adab665f 100644 --- a/network/mocknetwork/topology.go +++ b/network/mocknetwork/topology.go @@ -4,9 +4,9 @@ package mocknetwork import ( flow "github.com/onflow/flow-go/model/flow" - mock "github.com/stretchr/testify/mock" + channels "github.com/onflow/flow-go/network/channels" - network "github.com/onflow/flow-go/network" + mock "github.com/stretchr/testify/mock" ) // Topology is an autogenerated mock type for the Topology type @@ -14,13 +14,13 @@ type Topology struct { mock.Mock } -// GenerateFanout provides a mock function with given fields: ids, channels -func (_m *Topology) GenerateFanout(ids flow.IdentityList, channels network.ChannelList) (flow.IdentityList, error) { - ret := _m.Called(ids, channels) +// GenerateFanout provides a mock function with given fields: ids, _a1 +func (_m *Topology) GenerateFanout(ids flow.IdentityList, _a1 channels.ChannelList) (flow.IdentityList, error) { + ret := _m.Called(ids, _a1) var r0 flow.IdentityList - if rf, ok := ret.Get(0).(func(flow.IdentityList, network.ChannelList) flow.IdentityList); ok { - r0 = rf(ids, channels) + if rf, ok := ret.Get(0).(func(flow.IdentityList, channels.ChannelList) flow.IdentityList); ok { + r0 = rf(ids, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(flow.IdentityList) @@ -28,8 +28,8 @@ func (_m *Topology) GenerateFanout(ids flow.IdentityList, channels network.Chann } var r1 error - if rf, ok := ret.Get(1).(func(flow.IdentityList, network.ChannelList) error); ok { - r1 = rf(ids, channels) + if rf, ok := ret.Get(1).(func(flow.IdentityList, channels.ChannelList) error); ok { + r1 = rf(ids, _a1) } else { r1 = ret.Error(1) } diff --git a/network/network.go b/network/network.go index f60c3ddbe91..57c8f02576f 100644 --- a/network/network.go +++ b/network/network.go @@ -3,6 +3,7 @@ package network import ( "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p-core/protocol" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/component" @@ -18,12 +19,12 @@ type Network interface { // the engine will be notified with incoming messages on the channel. // The returned Conduit can be used to send messages to engines on other nodes subscribed to the same channel // On a single node, only one engine can be subscribed to a channel at any given time. - Register(channel Channel, messageProcessor MessageProcessor) (Conduit, error) + Register(channel channels.Channel, messageProcessor MessageProcessor) (Conduit, error) // RegisterBlobService registers a BlobService on the given channel, using the given datastore to retrieve values. // The returned BlobService can be used to request blocks from the network. // TODO: We should return a function that can be called to unregister / close the BlobService - RegisterBlobService(channel Channel, store datastore.Batching, opts ...BlobServiceOption) (BlobService, error) + RegisterBlobService(channel channels.Channel, store datastore.Batching, opts ...BlobServiceOption) (BlobService, error) // RegisterPingService registers a ping protocol handler for the given protocol ID RegisterPingService(pingProtocolID protocol.ID, pingInfoProvider PingInfoProvider) (PingService, error) @@ -34,16 +35,16 @@ type Network interface { // delivered to the remote targets. type Adapter interface { // UnicastOnChannel sends the message in a reliable way to the given recipient. - UnicastOnChannel(Channel, interface{}, flow.Identifier) error + UnicastOnChannel(channels.Channel, interface{}, flow.Identifier) error // PublishOnChannel sends the message in an unreliable way to all the given recipients. - PublishOnChannel(Channel, interface{}, ...flow.Identifier) error + PublishOnChannel(channels.Channel, interface{}, ...flow.Identifier) error // MulticastOnChannel unreliably sends the specified event over the channel to randomly selected number of recipients // selected from the specified targetIDs. - MulticastOnChannel(Channel, interface{}, uint, ...flow.Identifier) error + MulticastOnChannel(channels.Channel, interface{}, uint, ...flow.Identifier) error // UnRegisterChannel unregisters the engine for the specified channel. The engine will no longer be able to send or // receive messages from that channel. - UnRegisterChannel(channel Channel) error + UnRegisterChannel(channel channels.Channel) error } diff --git a/network/p2p/conduit/conduit.go b/network/p2p/conduit/conduit.go index 7f77cd52012..353e67c29fc 100644 --- a/network/p2p/conduit/conduit.go +++ b/network/p2p/conduit/conduit.go @@ -8,6 +8,7 @@ import ( "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) // DefaultConduitFactory is a wrapper around the network Adapter. @@ -48,7 +49,7 @@ func (d *DefaultConduitFactory) RegisterAdapter(adapter network.Adapter) error { // NewConduit creates a conduit on the specified channel. // Prior to creating any conduit, the factory requires an Adapter to be registered with it. -func (d *DefaultConduitFactory) NewConduit(ctx context.Context, channel network.Channel) (network.Conduit, error) { +func (d *DefaultConduitFactory) NewConduit(ctx context.Context, channel channels.Channel) (network.Conduit, error) { if d.adapter == nil { return nil, fmt.Errorf("could not create a new conduit, missing a registered network adapter") } @@ -69,7 +70,7 @@ func (d *DefaultConduitFactory) NewConduit(ctx context.Context, channel network. type Conduit struct { ctx context.Context cancel context.CancelFunc - channel network.Channel + channel channels.Channel adapter network.Adapter } diff --git a/network/p2p/dht_test.go b/network/p2p/dht_test.go index 1ec92d11015..58e0bd4f2a5 100644 --- a/network/p2p/dht_test.go +++ b/network/p2p/dht_test.go @@ -9,11 +9,11 @@ import ( "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" libp2pmsg "github.com/onflow/flow-go/model/libp2p/message" - flownet "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/utils/unittest" @@ -89,7 +89,7 @@ func TestPubSubWithDHTDiscovery(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - topic := flownet.Topic("/flow/" + unittest.IdentifierFixture().String()) + topic := channels.Topic("/flow/" + unittest.IdentifierFixture().String()) count := 5 golog.SetAllLoggers(golog.LevelFatal) // change this to Debug if libp2p logs are needed diff --git a/network/p2p/fixture_test.go b/network/p2p/fixture_test.go index a3e6077ad31..f1324345083 100644 --- a/network/p2p/fixture_test.go +++ b/network/p2p/fixture_test.go @@ -2,6 +2,7 @@ package p2p_test import ( "context" + "fmt" "net" "testing" "time" @@ -28,7 +29,7 @@ import ( ) // Workaround for https://github.com/stretchr/testify/pull/808 -const ticksForAssertEventually = 100 * time.Millisecond +const ticksForAssertEventually = 10 * time.Millisecond // Creating a node fixture with defaultAddress lets libp2p runs it on an // allocated port by OS. So after fixture created, its address would be @@ -149,10 +150,10 @@ func nodeFixture( err = n.WithDefaultUnicastProtocol(parameters.handlerFunc, parameters.unicasts) require.NoError(t, err) - //require.Eventuallyf(t, func() bool { - // ip, p, err := n.GetIPPort() - // return err == nil && ip != "" && p != "" - //}, 3*time.Second, ticksForAssertEventually, fmt.Sprintf("could not start node %s", identity.NodeID.String())) + require.Eventuallyf(t, func() bool { + ip, p, err := n.GetIPPort() + return err == nil && ip != "" && p != "" + }, 3*time.Second, ticksForAssertEventually, fmt.Sprintf("could not start node %s", identity.NodeID)) // get the actual IP and port that have been assigned by the subsystem ip, port, err := n.GetIPPort() diff --git a/network/p2p/libp2pNode.go b/network/p2p/libp2pNode.go index d28b6481e92..4154e4915cd 100644 --- a/network/p2p/libp2pNode.go +++ b/network/p2p/libp2pNode.go @@ -16,6 +16,7 @@ import ( dht "github.com/libp2p/go-libp2p-kad-dht" kbucket "github.com/libp2p/go-libp2p-kbucket" pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" flownet "github.com/onflow/flow-go/network" @@ -36,11 +37,11 @@ const ( type Node struct { sync.Mutex unicastManager *unicast.Manager - host host.Host // reference to the libp2p host (https://godoc.org/github.com/libp2p/go-libp2p-core/host) - pubSub *pubsub.PubSub // reference to the libp2p PubSub component - logger zerolog.Logger // used to provide logging - topics map[flownet.Topic]*pubsub.Topic // map of a topic string to an actual topic instance - subs map[flownet.Topic]*pubsub.Subscription // map of a topic string to an actual subscription + host host.Host // reference to the libp2p host (https://godoc.org/github.com/libp2p/go-libp2p-core/host) + pubSub *pubsub.PubSub // reference to the libp2p PubSub component + logger zerolog.Logger // used to provide logging + topics map[channels.Topic]*pubsub.Topic // map of a topic string to an actual topic instance + subs map[channels.Topic]*pubsub.Subscription // map of a topic string to an actual subscription routing routing.Routing pCache *protocolPeerCache } @@ -172,7 +173,7 @@ func (n *Node) ListPeers(topic string) []peer.ID { // Subscribe subscribes the node to the given topic and returns the subscription // Currently only one subscriber is allowed per topic. // NOTE: A node will receive its own published messages. -func (n *Node) Subscribe(topic flownet.Topic, codec flownet.Codec, peerFilter peerFilterFunc, validators ...validator.MessageValidator) (*pubsub.Subscription, error) { +func (n *Node) Subscribe(topic channels.Topic, codec flownet.Codec, peerFilter peerFilterFunc, validators ...validator.MessageValidator) (*pubsub.Subscription, error) { n.Lock() defer n.Unlock() @@ -217,7 +218,7 @@ func (n *Node) Subscribe(topic flownet.Topic, codec flownet.Codec, peerFilter pe } // UnSubscribe cancels the subscriber and closes the topic. -func (n *Node) UnSubscribe(topic flownet.Topic) error { +func (n *Node) UnSubscribe(topic channels.Topic) error { n.Lock() defer n.Unlock() // Remove the Subscriber from the cache @@ -253,7 +254,7 @@ func (n *Node) UnSubscribe(topic flownet.Topic) error { } // Publish publishes the given payload on the topic -func (n *Node) Publish(ctx context.Context, topic flownet.Topic, data []byte) error { +func (n *Node) Publish(ctx context.Context, topic channels.Topic, data []byte) error { ps, found := n.topics[topic] if !found { return fmt.Errorf("could not find topic (%s)", topic) diff --git a/network/p2p/libp2pNodeBuilder.go b/network/p2p/libp2pNodeBuilder.go index 6a7c660a66b..04bc4a89e32 100644 --- a/network/p2p/libp2pNodeBuilder.go +++ b/network/p2p/libp2pNodeBuilder.go @@ -19,6 +19,7 @@ import ( "github.com/libp2p/go-tcp-transport" "github.com/multiformats/go-multiaddr" madns "github.com/multiformats/go-multiaddr-dns" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" fcrypto "github.com/onflow/flow-go/crypto" @@ -26,7 +27,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/id" - flownet "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/p2p/keyutils" "github.com/onflow/flow-go/network/p2p/unicast" ) @@ -218,8 +218,8 @@ func (builder *LibP2PNodeBuilder) Build(ctx context.Context) (*Node, error) { } node := &Node{ - topics: make(map[flownet.Topic]*pubsub.Topic), - subs: make(map[flownet.Topic]*pubsub.Subscription), + topics: make(map[channels.Topic]*pubsub.Topic), + subs: make(map[channels.Topic]*pubsub.Subscription), logger: builder.logger, routing: rsys, host: host, diff --git a/network/p2p/middleware.go b/network/p2p/middleware.go index fbc9fa3ed9d..96dbf28f72d 100644 --- a/network/p2p/middleware.go +++ b/network/p2p/middleware.go @@ -17,6 +17,7 @@ import ( "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peerstore" "github.com/libp2p/go-libp2p-core/protocol" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" @@ -201,7 +202,7 @@ func (m *Middleware) isStakedPeerFilter() peerFilterFunc { return f } -func (m *Middleware) NewBlobService(channel network.Channel, ds datastore.Batching, opts ...network.BlobServiceOption) network.BlobService { +func (m *Middleware) NewBlobService(channel channels.Channel, ds datastore.Batching, opts ...network.BlobServiceOption) network.BlobService { return NewBlobService(m.libP2PNode.Host(), m.libP2PNode.routing, channel.String(), ds, opts...) } @@ -525,13 +526,13 @@ func (m *Middleware) handleIncomingStream(s libp2pnetwork.Stream) { } // Subscribe subscribes the middleware to a channel. -func (m *Middleware) Subscribe(channel network.Channel) error { +func (m *Middleware) Subscribe(channel channels.Channel) error { - topic := network.TopicFromChannel(channel, m.rootBlockID) + topic := channels.TopicFromChannel(channel, m.rootBlockID) var peerFilter peerFilterFunc var validators []psValidator.MessageValidator - if network.PublicChannels().Contains(channel) { + if channels.PublicChannels().Contains(channel) { // NOTE: for public channels the callback used to check if a node is staked will // return true for every node. peerFilter = allowAll @@ -565,8 +566,8 @@ func (m *Middleware) Subscribe(channel network.Channel) error { } // Unsubscribe unsubscribes the middleware from a channel. -func (m *Middleware) Unsubscribe(channel network.Channel) error { - topic := network.TopicFromChannel(channel, m.rootBlockID) +func (m *Middleware) Unsubscribe(channel channels.Channel) error { + topic := channels.TopicFromChannel(channel, m.rootBlockID) err := m.libP2PNode.UnSubscribe(topic) if err != nil { return fmt.Errorf("failed to unsubscribe from channel %s: %w", channel, err) @@ -622,7 +623,7 @@ func (m *Middleware) processMessage(msg *message.Message, decodedMsgPayload inte // Publish publishes a message on the channel. It models a distributed broadcast where the message is meant for all or // a many nodes subscribing to the channel. It does not guarantee the delivery though, and operates on a best // effort. -func (m *Middleware) Publish(msg *message.Message, channel network.Channel) error { +func (m *Middleware) Publish(msg *message.Message, channel channels.Channel) error { m.log.Debug().Str("channel", channel.String()).Interface("msg", msg).Msg("publishing new message") // convert the message to bytes to be put on the wire. @@ -640,7 +641,7 @@ func (m *Middleware) Publish(msg *message.Message, channel network.Channel) erro return fmt.Errorf("message size %d exceeds configured max message size %d", msgSize, DefaultMaxPubSubMsgSize) } - topic := network.TopicFromChannel(channel, m.rootBlockID) + topic := channels.TopicFromChannel(channel, m.rootBlockID) // publish the bytes on the topic err = m.libP2PNode.Publish(m.ctx, topic, data) diff --git a/network/p2p/network.go b/network/p2p/network.go index 75089898cdd..d756cd19704 100644 --- a/network/p2p/network.go +++ b/network/p2p/network.go @@ -10,6 +10,7 @@ import ( "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/protocol" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/crypto/hash" @@ -75,7 +76,7 @@ type Network struct { var _ network.Network = (*Network)(nil) type registerEngineRequest struct { - channel network.Channel + channel channels.Channel messageProcessor network.MessageProcessor respChan chan *registerEngineResp } @@ -86,8 +87,8 @@ type registerEngineResp struct { } type registerBlobServiceRequest struct { - channel network.Channel - ds datastore.Batching + channel channels.Channel + ds datastore.Batching opts []network.BlobServiceOption respChan chan *registerBlobServiceResp } @@ -218,8 +219,8 @@ func (n *Network) runMiddleware(ctx irrecoverable.SignalerContext, ready compone <-n.mw.Done() } -func (n *Network) handleRegisterEngineRequest(parent irrecoverable.SignalerContext, channel network.Channel, engine network.MessageProcessor) (network.Conduit, error) { - if !network.ChannelExists(channel) { +func (n *Network) handleRegisterEngineRequest(parent irrecoverable.SignalerContext, channel channels.Channel, engine network.MessageProcessor) (network.Conduit, error) { + if !channels.ChannelExists(channel) { return nil, fmt.Errorf("unknown channel: %s, should be registered in topic map", channel) } @@ -241,7 +242,7 @@ func (n *Network) handleRegisterEngineRequest(parent irrecoverable.SignalerConte return conduit, nil } -func (n *Network) handleRegisterBlobServiceRequest(parent irrecoverable.SignalerContext, channel network.Channel, ds datastore.Batching, opts []network.BlobServiceOption) (network.BlobService, error) { +func (n *Network) handleRegisterBlobServiceRequest(parent irrecoverable.SignalerContext, channel channels.Channel, ds datastore.Batching, opts []network.BlobServiceOption) (network.BlobService, error) { bs := n.mw.NewBlobService(channel, ds, opts...) // start the blob service using the network's context @@ -253,7 +254,7 @@ func (n *Network) handleRegisterBlobServiceRequest(parent irrecoverable.Signaler // Register will register the given engine with the given unique engine engineID, // returning a conduit to directly submit messages to the message bus of the // engine. -func (n *Network) Register(channel network.Channel, messageProcessor network.MessageProcessor) (network.Conduit, error) { +func (n *Network) Register(channel channels.Channel, messageProcessor network.MessageProcessor) (network.Conduit, error) { respChan := make(chan *registerEngineResp) select { @@ -284,7 +285,7 @@ func (n *Network) RegisterPingService(pingProtocol protocol.ID, provider network // RegisterBlobService registers a BlobService on the given channel. // The returned BlobService can be used to request blobs from the network. -func (n *Network) RegisterBlobService(channel network.Channel, ds datastore.Batching, opts ...network.BlobServiceOption) (network.BlobService, error) { +func (n *Network) RegisterBlobService(channel channels.Channel, ds datastore.Batching, opts ...network.BlobServiceOption) (network.BlobService, error) { respChan := make(chan *registerBlobServiceResp) select { @@ -307,7 +308,7 @@ func (n *Network) RegisterBlobService(channel network.Channel, ds datastore.Batc // UnRegisterChannel unregisters the engine for the specified channel. The engine will no longer be able to send or // receive messages from that channel. -func (n *Network) UnRegisterChannel(channel network.Channel) error { +func (n *Network) UnRegisterChannel(channel channels.Channel) error { err := n.subscriptionManager.Unregister(channel) if err != nil { return fmt.Errorf("failed to unregister engine for channel %s: %w", channel, err) @@ -368,7 +369,7 @@ func (n *Network) processNetworkMessage(senderID flow.Identifier, message *messa qm := queue.QMessage{ Payload: decodedMsgPayload, Size: message.Size(), - Target: network.Channel(message.ChannelID), + Target: channels.Channel(message.ChannelID), SenderID: senderID, } @@ -382,7 +383,7 @@ func (n *Network) processNetworkMessage(senderID flow.Identifier, message *messa } // genNetworkMessage uses the codec to encode an event into a NetworkMessage -func (n *Network) genNetworkMessage(channel network.Channel, event interface{}, targetIDs ...flow.Identifier) (*message.Message, error) { +func (n *Network) genNetworkMessage(channel channels.Channel, event interface{}, targetIDs ...flow.Identifier) (*message.Message, error) { // encode the payload using the configured codec payload, err := n.codec.Encode(event) if err != nil { @@ -426,7 +427,7 @@ func (n *Network) genNetworkMessage(channel network.Channel, event interface{}, // UnicastOnChannel sends the message in a reliable way to the given recipient. // It uses 1-1 direct messaging over the underlying network to deliver the message. // It returns an error if unicasting fails. -func (n *Network) UnicastOnChannel(channel network.Channel, message interface{}, targetID flow.Identifier) error { +func (n *Network) UnicastOnChannel(channel channels.Channel, message interface{}, targetID flow.Identifier) error { if targetID == n.me.NodeID() { n.logger.Debug().Msg("network skips self unicasting") return nil @@ -450,7 +451,7 @@ func (n *Network) UnicastOnChannel(channel network.Channel, message interface{}, // In this context, unreliable means that the message is published over a libp2p pub-sub // channel and can be read by any node subscribed to that channel. // The selector could be used to optimize or restrict delivery. -func (n *Network) PublishOnChannel(channel network.Channel, message interface{}, targetIDs ...flow.Identifier) error { +func (n *Network) PublishOnChannel(channel channels.Channel, message interface{}, targetIDs ...flow.Identifier) error { filteredIDs := flow.IdentifierList(targetIDs).Filter(n.removeSelfFilter()) if len(filteredIDs) == 0 { @@ -468,7 +469,7 @@ func (n *Network) PublishOnChannel(channel network.Channel, message interface{}, // MulticastOnChannel unreliably sends the specified event over the channel to randomly selected 'num' number of recipients // selected from the specified targetIDs. -func (n *Network) MulticastOnChannel(channel network.Channel, message interface{}, num uint, targetIDs ...flow.Identifier) error { +func (n *Network) MulticastOnChannel(channel channels.Channel, message interface{}, num uint, targetIDs ...flow.Identifier) error { selectedIDs := flow.IdentifierList(targetIDs).Filter(n.removeSelfFilter()).Sample(num) if len(selectedIDs) == 0 { @@ -493,7 +494,7 @@ func (n *Network) removeSelfFilter() flow.IdentifierFilter { } // sendOnChannel sends the message on channel to targets. -func (n *Network) sendOnChannel(channel network.Channel, message interface{}, targetIDs []flow.Identifier) error { +func (n *Network) sendOnChannel(channel channels.Channel, message interface{}, targetIDs []flow.Identifier) error { n.logger.Debug(). Interface("message", message). Str("channel", channel.String()). @@ -548,7 +549,7 @@ func (n *Network) queueSubmitFunc(message interface{}) { n.metrics.MessageProcessingFinished(qm.Target.String(), time.Since(startTimestamp)) } -func EventId(channel network.Channel, payload []byte) (hash.Hash, error) { +func EventId(channel channels.Channel, payload []byte) (hash.Hash, error) { // use a hash with an engine-specific salt to get the payload hash h := hash.NewSHA3_384() _, err := h.Write([]byte(eventIDPackingPrefix + channel)) diff --git a/network/p2p/sporking_test.go b/network/p2p/sporking_test.go index 95cb146e5f0..590794062dd 100644 --- a/network/p2p/sporking_test.go +++ b/network/p2p/sporking_test.go @@ -8,12 +8,12 @@ import ( "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peerstore" pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" libp2pmessage "github.com/onflow/flow-go/model/libp2p/message" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/utils/unittest" @@ -162,7 +162,7 @@ func TestOneToKCrosstalkPrevention(t *testing.T) { require.NoError(t, err) // spork topic is derived by suffixing the channel with the root block ID - topicBeforeSpork := network.TopicFromChannel(network.TestNetworkChannel, previousSporkId) + topicBeforeSpork := channels.TopicFromChannel(channels.TestNetworkChannel, previousSporkId) // both nodes are initially on the same spork and subscribed to the same topic _, err = node1.Subscribe(topicBeforeSpork, unittest.NetworkCodec(), unittest.AllowAllPeerFilter()) @@ -184,7 +184,7 @@ func TestOneToKCrosstalkPrevention(t *testing.T) { rootIDAfterSpork := unittest.IdentifierFixture() // topic after the spork - topicAfterSpork := network.TopicFromChannel(network.TestNetworkChannel, rootIDAfterSpork) + topicAfterSpork := channels.TopicFromChannel(channels.TestNetworkChannel, rootIDAfterSpork) // mimic that node1 now is now part of the new spork while node2 remains on the old spork // by unsubscribing node1 from 'topicBeforeSpork' and subscribing it to 'topicAfterSpork' @@ -221,7 +221,7 @@ func testOneToKMessagingSucceeds(ctx context.Context, t *testing.T, sourceNode *p2p.Node, dstnSub *pubsub.Subscription, - topic network.Topic) { + topic channels.Topic) { payload := createTestMessage(t) @@ -243,7 +243,7 @@ func testOneToKMessagingFails(ctx context.Context, t *testing.T, sourceNode *p2p.Node, dstnSub *pubsub.Subscription, - topic network.Topic) { + topic channels.Topic) { payload := createTestMessage(t) diff --git a/network/p2p/subscriptionManager.go b/network/p2p/subscriptionManager.go index ee2f25a3b16..238d4c5995e 100644 --- a/network/p2p/subscriptionManager.go +++ b/network/p2p/subscriptionManager.go @@ -5,25 +5,26 @@ import ( "sync" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) // ChannelSubscriptionManager manages subscriptions of engines running on the node to channels. // Each channel should be taken by at most a single engine. type ChannelSubscriptionManager struct { mu sync.RWMutex - engines map[network.Channel]network.MessageProcessor + engines map[channels.Channel]network.MessageProcessor mw network.Middleware } func NewChannelSubscriptionManager(mw network.Middleware) *ChannelSubscriptionManager { return &ChannelSubscriptionManager{ - engines: make(map[network.Channel]network.MessageProcessor), + engines: make(map[channels.Channel]network.MessageProcessor), mw: mw, } } // Register registers an engine on the channel into the subscription manager. -func (sm *ChannelSubscriptionManager) Register(channel network.Channel, engine network.MessageProcessor) error { +func (sm *ChannelSubscriptionManager) Register(channel channels.Channel, engine network.MessageProcessor) error { sm.mu.Lock() defer sm.mu.Unlock() @@ -46,7 +47,7 @@ func (sm *ChannelSubscriptionManager) Register(channel network.Channel, engine n } // Unregister removes the engine associated with a channel. -func (sm *ChannelSubscriptionManager) Unregister(channel network.Channel) error { +func (sm *ChannelSubscriptionManager) Unregister(channel channels.Channel) error { sm.mu.Lock() defer sm.mu.Unlock() @@ -68,7 +69,7 @@ func (sm *ChannelSubscriptionManager) Unregister(channel network.Channel) error } // GetEngine returns engine associated with a channel. -func (sm *ChannelSubscriptionManager) GetEngine(channel network.Channel) (network.MessageProcessor, error) { +func (sm *ChannelSubscriptionManager) GetEngine(channel channels.Channel) (network.MessageProcessor, error) { sm.mu.RLock() defer sm.mu.RUnlock() @@ -80,11 +81,11 @@ func (sm *ChannelSubscriptionManager) GetEngine(channel network.Channel) (networ } // Channels returns all the channels registered in this subscription manager. -func (sm *ChannelSubscriptionManager) Channels() network.ChannelList { +func (sm *ChannelSubscriptionManager) Channels() channels.ChannelList { sm.mu.RLock() defer sm.mu.RUnlock() - channels := make(network.ChannelList, 0) + channels := make(channels.ChannelList, 0) for channel := range sm.engines { channels = append(channels, channel) } diff --git a/network/p2p/subscription_filter.go b/network/p2p/subscription_filter.go index 378b86246d3..aea7fa6c8cc 100644 --- a/network/p2p/subscription_filter.go +++ b/network/p2p/subscription_filter.go @@ -3,10 +3,10 @@ package p2p import ( "github.com/libp2p/go-libp2p-core/peer" pb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/id" - "github.com/onflow/flow-go/network" ) // RoleBasedFilter implements a subscription filter that filters subscriptions based on a node's role. @@ -35,7 +35,7 @@ func (f *RoleBasedFilter) getRole(pid peer.ID) flow.Role { } func (f *RoleBasedFilter) allowed(role flow.Role, topic string) bool { - channel, ok := network.ChannelFromTopic(network.Topic(topic)) + channel, ok := channels.ChannelFromTopic(channels.Topic(topic)) if !ok { return false } @@ -44,9 +44,9 @@ func (f *RoleBasedFilter) allowed(role flow.Role, topic string) bool { // TODO: eventually we should have block proposals relayed on a separate // channel on the public network. For now, we need to make sure that // full observer nodes can subscribe to the block proposal channel. - return append(network.PublicChannels(), network.ReceiveBlocks).Contains(channel) + return append(channels.PublicChannels(), channels.ReceiveBlocks).Contains(channel) } else { - if roles, ok := network.RolesByChannel(channel); ok { + if roles, ok := channels.RolesByChannel(channel); ok { return roles.Contains(role) } diff --git a/network/p2p/subscription_filter_test.go b/network/p2p/subscription_filter_test.go index fd29d9f6d8d..d9032cc3adc 100644 --- a/network/p2p/subscription_filter_test.go +++ b/network/p2p/subscription_filter_test.go @@ -8,12 +8,12 @@ import ( "github.com/libp2p/go-libp2p-core/host" pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/id" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/utils/unittest" ) @@ -39,7 +39,7 @@ func TestFilterSubscribe(t *testing.T) { require.NoError(t, node1.AddPeer(context.TODO(), *host.InfoFromHost(node2.Host()))) require.NoError(t, node1.AddPeer(context.TODO(), *host.InfoFromHost(unstakedNode.Host()))) - badTopic := network.TopicFromChannel(network.SyncCommittee, sporkId) + badTopic := channels.TopicFromChannel(channels.SyncCommittee, sporkId) sub1, err := node1.Subscribe(badTopic, unittest.NetworkCodec(), unittest.AllowAllPeerFilter()) require.NoError(t, err) @@ -111,25 +111,25 @@ func TestCanSubscribe(t *testing.T) { unittest.RequireCloseBefore(t, done, 1*time.Second, "could not stop collection node on time") }() - goodTopic := network.TopicFromChannel(network.ProvideCollections, sporkId) + goodTopic := channels.TopicFromChannel(channels.ProvideCollections, sporkId) _, err := collectionNode.Subscribe(goodTopic, unittest.NetworkCodec(), unittest.AllowAllPeerFilter()) require.NoError(t, err) - var badTopic network.Topic - allowedChannels := make(map[network.Channel]struct{}) - for _, ch := range network.ChannelsByRole(flow.RoleCollection) { + var badTopic channels.Topic + allowedChannels := make(map[channels.Channel]struct{}) + for _, ch := range channels.ChannelsByRole(flow.RoleCollection) { allowedChannels[ch] = struct{}{} } - for _, ch := range network.Channels() { + for _, ch := range channels.Channels() { if _, ok := allowedChannels[ch]; !ok { - badTopic = network.TopicFromChannel(ch, sporkId) + badTopic = channels.TopicFromChannel(ch, sporkId) break } } _, err = collectionNode.Subscribe(badTopic, unittest.NetworkCodec(), unittest.AllowAllPeerFilter()) require.Error(t, err) - clusterTopic := network.TopicFromChannel(network.ChannelSyncCluster(flow.Emulator), sporkId) + clusterTopic := channels.TopicFromChannel(channels.ChannelSyncCluster(flow.Emulator), sporkId) _, err = collectionNode.Subscribe(clusterTopic, unittest.NetworkCodec(), unittest.AllowAllPeerFilter()) require.NoError(t, err) } diff --git a/network/p2p/topic_validator_test.go b/network/p2p/topic_validator_test.go index e4139ec7345..0791fb8dd8d 100644 --- a/network/p2p/topic_validator_test.go +++ b/network/p2p/topic_validator_test.go @@ -11,12 +11,12 @@ import ( "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/peer" pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" validator "github.com/onflow/flow-go/network/validator/pubsub" @@ -39,8 +39,8 @@ func TestTopicValidator_Unstaked(t *testing.T) { sn1, identity1 := nodeFixture(t, context.Background(), sporkId, "TestAuthorizedSenderValidator_Unauthorized", withRole(flow.RoleConsensus), withLogger(logger)) sn2, _ := nodeFixture(t, context.Background(), sporkId, "TestAuthorizedSenderValidator_Unauthorized", withRole(flow.RoleConsensus), withLogger(logger)) - channel := network.ConsensusCommittee - topic := network.TopicFromChannel(channel, sporkId) + channel := channels.ConsensusCommittee + topic := channels.TopicFromChannel(channel, sporkId) //NOTE: identity2 is not in the ids list simulating an un-staked node ids := flow.IdentityList{&identity1} @@ -116,8 +116,8 @@ func TestTopicValidator_PublicChannel(t *testing.T) { sn2 := createNode(t, identity2.NodeID, privateKey2, sporkId, zerolog.Nop()) // unauthenticated messages should not be dropped on public channels - channel := network.PublicSyncCommittee - topic := network.TopicFromChannel(channel, sporkId) + channel := channels.PublicSyncCommittee + topic := channels.TopicFromChannel(channel, sporkId) // node1 is connected to node2 // sn1 <-> sn2 @@ -178,8 +178,8 @@ func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { sn2, identity2 := nodeFixture(t, context.Background(), sporkId, "TestAuthorizedSenderValidator_InvalidMsg", withRole(flow.RoleConsensus)) an1, identity3 := nodeFixture(t, context.Background(), sporkId, "TestAuthorizedSenderValidator_InvalidMsg", withRole(flow.RoleAccess)) - channel := network.ConsensusCommittee - topic := network.TopicFromChannel(channel, sporkId) + channel := channels.ConsensusCommittee + topic := channels.TopicFromChannel(channel, sporkId) ids := flow.IdentityList{&identity1, &identity2, &identity3} @@ -274,8 +274,8 @@ func TestAuthorizedSenderValidator_InvalidMsg(t *testing.T) { sn2, identity2 := nodeFixture(t, context.Background(), sporkId, "consensus_2", withRole(flow.RoleConsensus)) // try to publish BlockProposal on invalid SyncCommittee channel - channel := network.SyncCommittee - topic := network.TopicFromChannel(channel, sporkId) + channel := channels.SyncCommittee + topic := channels.TopicFromChannel(channel, sporkId) ids := flow.IdentityList{&identity1, &identity2} translator, err := p2p.NewFixedTableIdentityTranslator(ids) @@ -345,8 +345,8 @@ func TestAuthorizedSenderValidator_Ejected(t *testing.T) { sn2, identity2 := nodeFixture(t, context.Background(), sporkId, "consensus_2", withRole(flow.RoleConsensus)) an1, identity3 := nodeFixture(t, context.Background(), sporkId, "access_1", withRole(flow.RoleAccess)) - channel := network.ConsensusCommittee - topic := network.TopicFromChannel(channel, sporkId) + channel := channels.ConsensusCommittee + topic := channels.TopicFromChannel(channel, sporkId) ids := flow.IdentityList{&identity1, &identity2, &identity3} translator, err := p2p.NewFixedTableIdentityTranslator(ids) @@ -438,8 +438,8 @@ func TestAuthorizedSenderValidator_ClusterChannel(t *testing.T) { ln2, identity2 := nodeFixture(t, context.Background(), sporkId, "collection_2", withRole(flow.RoleCollection)) ln3, identity3 := nodeFixture(t, context.Background(), sporkId, "collection_3", withRole(flow.RoleCollection)) - channel := network.ChannelSyncCluster(flow.Testnet) - topic := network.TopicFromChannel(channel, sporkId) + channel := channels.ChannelSyncCluster(flow.Testnet) + topic := channels.TopicFromChannel(channel, sporkId) ids := flow.IdentityList{&identity1, &identity2, &identity3} translator, err := p2p.NewFixedTableIdentityTranslator(ids) diff --git a/network/proxy/network.go b/network/proxy/network.go index 38f3c685215..57ce6d2f965 100644 --- a/network/proxy/network.go +++ b/network/proxy/network.go @@ -3,6 +3,7 @@ package proxy import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) type ProxyNetwork struct { @@ -20,7 +21,7 @@ func NewProxyNetwork(net network.Network, targetNodeID flow.Identifier) *ProxyNe } // Register registers an engine with the proxy network. -func (n *ProxyNetwork) Register(channel network.Channel, engine network.Engine) (network.Conduit, error) { +func (n *ProxyNetwork) Register(channel channels.Channel, engine network.Engine) (network.Conduit, error) { con, err := n.Network.Register(channel, engine) if err != nil { diff --git a/network/proxy/network_test.go b/network/proxy/network_test.go index fe12417f1e3..095ceb89bd8 100644 --- a/network/proxy/network_test.go +++ b/network/proxy/network_test.go @@ -3,6 +3,7 @@ package proxy_test import ( "testing" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" @@ -48,7 +49,7 @@ func (suite *Suite) SetupTest() { // TestUnicast tests that the Unicast method is translated to a unicast to the target node // on the underlying network instance. func (suite *Suite) TestUnicast() { - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") targetID := unittest.IdentifierFixture() event := getEvent() @@ -67,7 +68,7 @@ func (suite *Suite) TestUnicast() { // TestPublish tests that the Publish method is translated to a publish to the target node // on the underlying network instance. func (suite *Suite) TestPublish() { - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") targetIDs := make([]flow.Identifier, 10) for i := 0; i < 10; i++ { @@ -91,7 +92,7 @@ func (suite *Suite) TestPublish() { // TestUnicast tests that the Multicast method is translated to a multicast to the target node // on the underlying network instance. func (suite *Suite) TestMulticast() { - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") targetIDs := make([]flow.Identifier, 10) for i := 0; i < 10; i++ { @@ -114,7 +115,7 @@ func (suite *Suite) TestMulticast() { // TestClose tests that closing the proxy conduit closes the wrapped conduit. func (suite *Suite) TestClose() { - channel := network.Channel("test-channel") + channel := channels.Channel("test-channel") con, err := suite.proxyNet.Register(channel, suite.engine) suite.Assert().NoError(err) diff --git a/network/queue/eventPriority.go b/network/queue/eventPriority.go index cf2c7ff4028..f6e043afb08 100644 --- a/network/queue/eventPriority.go +++ b/network/queue/eventPriority.go @@ -7,7 +7,7 @@ import ( "github.com/onflow/flow-go/model/flow" libp2pmessage "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/model/messages" - "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) const ( @@ -18,10 +18,10 @@ const ( // QMessage is the message that is enqueued for each incoming message type QMessage struct { - Payload interface{} // the decoded message - Size int // the size of the message in bytes - Target network.Channel // the target channel to lookup the engine - SenderID flow.Identifier // senderID for logging + Payload interface{} // the decoded message + Size int // the size of the message in bytes + Target channels.Channel // the target channel to lookup the engine + SenderID flow.Identifier // senderID for logging } // GetEventPriority returns the priority of the flow event message. diff --git a/network/relay/network.go b/network/relay/network.go index 96a62cc3959..0ea4367593a 100644 --- a/network/relay/network.go +++ b/network/relay/network.go @@ -5,6 +5,7 @@ import ( "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p-core/protocol" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/module/irrecoverable" @@ -15,8 +16,8 @@ import ( type RelayNetwork struct { originNet network.Network destinationNet network.Network - logger zerolog.Logger - channels network.ChannelList + logger zerolog.Logger + channels channels.ChannelList } var _ network.Network = (*RelayNetwork)(nil) @@ -25,7 +26,7 @@ func NewRelayNetwork( originNetwork network.Network, destinationNetwork network.Network, logger zerolog.Logger, - channels []network.Channel, + channels []channels.Channel, ) *RelayNetwork { return &RelayNetwork{ originNet: originNetwork, @@ -35,7 +36,7 @@ func NewRelayNetwork( } } -func (r *RelayNetwork) Register(channel network.Channel, messageProcessor network.MessageProcessor) (network.Conduit, error) { +func (r *RelayNetwork) Register(channel channels.Channel, messageProcessor network.MessageProcessor) (network.Conduit, error) { if !r.channels.Contains(channel) { return r.originNet.Register(channel, messageProcessor) } @@ -69,7 +70,7 @@ func (r *RelayNetwork) Done() <-chan struct{} { return util.AllDone(r.originNet, r.destinationNet) } -func (r *RelayNetwork) RegisterBlobService(channel network.Channel, store datastore.Batching, opts ...network.BlobServiceOption) (network.BlobService, error) { +func (r *RelayNetwork) RegisterBlobService(channel channels.Channel, store datastore.Batching, opts ...network.BlobServiceOption) (network.BlobService, error) { return r.originNet.RegisterBlobService(channel, store, opts...) } diff --git a/network/relay/relayer.go b/network/relay/relayer.go index cef7e2803c2..6d676e0287c 100644 --- a/network/relay/relayer.go +++ b/network/relay/relayer.go @@ -3,6 +3,7 @@ package relay import ( "fmt" + "github.com/onflow/flow-go/network/channels" "golang.org/x/sync/errgroup" "github.com/onflow/flow-go/model/flow" @@ -18,13 +19,13 @@ type Relayer struct { // ignored. If a usecase arises, we should implement a mechanism to forward these messages to a handler. type noopProcessor struct{} -func (n *noopProcessor) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (n *noopProcessor) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { return nil } var _ network.MessageProcessor = (*Relayer)(nil) -func NewRelayer(destinationNetwork network.Network, channel network.Channel, processor network.MessageProcessor) (*Relayer, error) { +func NewRelayer(destinationNetwork network.Network, channel channels.Channel, processor network.MessageProcessor) (*Relayer, error) { conduit, err := destinationNetwork.Register(channel, &noopProcessor{}) if err != nil { @@ -38,7 +39,7 @@ func NewRelayer(destinationNetwork network.Network, channel network.Channel, pro } -func (r *Relayer) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (r *Relayer) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { g := new(errgroup.Group) g.Go(func() error { diff --git a/network/stub/buffer.go b/network/stub/buffer.go index 98b4905df8a..8f290e929fd 100644 --- a/network/stub/buffer.go +++ b/network/stub/buffer.go @@ -4,14 +4,14 @@ import ( "sync" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) // PendingMessage is a pending message to be sent type PendingMessage struct { // The sender node id From flow.Identifier - Channel network.Channel + Channel channels.Channel Event interface{} // The id of the receiver nodes TargetIDs []flow.Identifier diff --git a/network/stub/hash.go b/network/stub/hash.go index e9490d482cc..fd13f0906f8 100644 --- a/network/stub/hash.go +++ b/network/stub/hash.go @@ -7,11 +7,11 @@ import ( "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/encoding/json" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) // eventKey generates a unique fingerprint for the tuple of (sender, event, type of event, channel) -func eventKey(from flow.Identifier, channel network.Channel, event interface{}) (string, error) { +func eventKey(from flow.Identifier, channel channels.Channel, event interface{}) (string, error) { marshaler := json.NewMarshaler() tag, err := marshaler.Marshal([]byte(fmt.Sprintf("testthenetwork %s %T", channel, event))) diff --git a/network/stub/network.go b/network/stub/network.go index fe68649870d..c976cdc9eeb 100644 --- a/network/stub/network.go +++ b/network/stub/network.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/network/channels" "github.com/pkg/errors" "github.com/stretchr/testify/require" @@ -25,11 +26,11 @@ type Network struct { mocknetwork.Network ctx context.Context sync.Mutex - myId flow.Identifier // used to represent information of the attached node. - hub *Hub // used to attach Network layers of nodes together. - engines map[network.Channel]network.MessageProcessor // used to keep track of attached engines of the node. - seenEventIDs sync.Map // used to keep track of event IDs seen by attached engines. - qCD chan struct{} // used to stop continuous delivery mode of the Network. + myId flow.Identifier // used to represent information of the attached node. + hub *Hub // used to attach Network layers of nodes together. + engines map[channels.Channel]network.MessageProcessor // used to keep track of attached engines of the node. + seenEventIDs sync.Map // used to keep track of event IDs seen by attached engines. + qCD chan struct{} // used to stop continuous delivery mode of the Network. conduitFactory network.ConduitFactory } @@ -47,7 +48,7 @@ func NewNetwork(t testing.TB, myId flow.Identifier, hub *Hub, opts ...func(*Netw ctx: context.Background(), myId: myId, hub: hub, - engines: make(map[network.Channel]network.MessageProcessor), + engines: make(map[channels.Channel]network.MessageProcessor), qCD: make(chan struct{}), conduitFactory: conduit.NewDefaultConduitFactory(), } @@ -70,7 +71,7 @@ func (n *Network) GetID() flow.Identifier { // Register registers an Engine of the attached node to the channel via a Conduit, and returns the // Conduit instance. -func (n *Network) Register(channel network.Channel, engine network.MessageProcessor) (network.Conduit, error) { +func (n *Network) Register(channel channels.Channel, engine network.MessageProcessor) (network.Conduit, error) { n.Lock() defer n.Unlock() _, ok := n.engines[channel] @@ -88,7 +89,7 @@ func (n *Network) Register(channel network.Channel, engine network.MessageProces return c, nil } -func (n *Network) UnRegisterChannel(channel network.Channel) error { +func (n *Network) UnRegisterChannel(channel channels.Channel) error { n.Lock() defer n.Unlock() delete(n.engines, channel) @@ -97,7 +98,7 @@ func (n *Network) UnRegisterChannel(channel network.Channel) error { // submit is called when the attached Engine to the channel is sending an event to an // Engine attached to the same channel on another node or nodes. -func (n *Network) submit(channel network.Channel, event interface{}, targetIDs ...flow.Identifier) error { +func (n *Network) submit(channel channels.Channel, event interface{}, targetIDs ...flow.Identifier) error { m := &PendingMessage{ From: n.GetID(), Channel: channel, @@ -112,7 +113,7 @@ func (n *Network) submit(channel network.Channel, event interface{}, targetIDs . // unicast is called when the attached Engine to the channel is sending an event to a single target // Engine attached to the same channel on another node. -func (n *Network) UnicastOnChannel(channel network.Channel, event interface{}, targetID flow.Identifier) error { +func (n *Network) UnicastOnChannel(channel channels.Channel, event interface{}, targetID flow.Identifier) error { m := &PendingMessage{ From: n.GetID(), Channel: channel, @@ -127,7 +128,7 @@ func (n *Network) UnicastOnChannel(channel network.Channel, event interface{}, t // publish is called when the attached Engine is sending an event to a group of Engines attached to the // same channel on other nodes based on selector. // In this test helper implementation, publish uses submit method under the hood. -func (n *Network) PublishOnChannel(channel network.Channel, event interface{}, targetIDs ...flow.Identifier) error { +func (n *Network) PublishOnChannel(channel channels.Channel, event interface{}, targetIDs ...flow.Identifier) error { if len(targetIDs) == 0 { return fmt.Errorf("publish found empty target ID list for the message") @@ -139,7 +140,7 @@ func (n *Network) PublishOnChannel(channel network.Channel, event interface{}, t // multicast is called when an engine attached to the channel is sending an event to a number of randomly chosen // Engines attached to the same channel on other nodes. The targeted nodes are selected based on the selector. // In this test helper implementation, multicast uses submit method under the hood. -func (n *Network) MulticastOnChannel(channel network.Channel, event interface{}, num uint, targetIDs ...flow.Identifier) error { +func (n *Network) MulticastOnChannel(channel channels.Channel, event interface{}, num uint, targetIDs ...flow.Identifier) error { targetIDs = flow.Sample(num, targetIDs...) return n.submit(channel, event, targetIDs...) } diff --git a/network/subscription.go b/network/subscription.go index d472e4c8148..9a3a4d21eaf 100644 --- a/network/subscription.go +++ b/network/subscription.go @@ -1,15 +1,17 @@ package network +import "github.com/onflow/flow-go/network/channels" + type SubscriptionManager interface { // Register registers an engine on the channel into the subscription manager. - Register(channel Channel, engine MessageProcessor) error + Register(channel channels.Channel, engine MessageProcessor) error // Unregister removes the engine associated with a channel. - Unregister(channel Channel) error + Unregister(channel channels.Channel) error // GetEngine returns engine associated with a channel. - GetEngine(channel Channel) (MessageProcessor, error) + GetEngine(channel channels.Channel) (MessageProcessor, error) // Channels returns all the channels registered in this subscription manager. - Channels() ChannelList + Channels() channels.ChannelList } diff --git a/network/test/blob_service_test.go b/network/test/blob_service_test.go index 18a6f408ee0..db02f4206db 100644 --- a/network/test/blob_service_test.go +++ b/network/test/blob_service_test.go @@ -11,6 +11,7 @@ import ( "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/sync" blockstore "github.com/ipfs/go-ipfs-blockstore" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/suite" "go.uber.org/atomic" @@ -34,7 +35,7 @@ type conditionalTopology struct { var _ network.Topology = (*conditionalTopology)(nil) -func (t *conditionalTopology) GenerateFanout(ids flow.IdentityList, channels network.ChannelList) (flow.IdentityList, error) { +func (t *conditionalTopology) GenerateFanout(ids flow.IdentityList, channels channels.ChannelList) (flow.IdentityList, error) { if t.condition() { return t.top.GenerateFanout(ids, channels) } else { @@ -94,7 +95,7 @@ func (suite *BlobServiceTestSuite) SetupTest() { ) suite.networks = networks - blobExchangeChannel := network.Channel("blob-exchange") + blobExchangeChannel := channels.Channel("blob-exchange") for i, net := range networks { ds := sync.MutexWrap(datastore.NewMapDatastore()) diff --git a/network/test/echoengine.go b/network/test/echoengine.go index a4e460f2662..fc24b689989 100644 --- a/network/test/echoengine.go +++ b/network/test/echoengine.go @@ -6,6 +6,7 @@ import ( "sync" "testing" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" @@ -23,7 +24,7 @@ type EchoEngine struct { con network.Conduit // used to directly communicate with the network originID flow.Identifier // used to keep track of the id of the sender of the messages event chan interface{} // used to keep track of the events that the node receives - channel chan network.Channel // used to keep track of the channels that events are received on + channel chan channels.Channel // used to keep track of the channels that events are received on received chan struct{} // used as an indicator on reception of messages for testing echomsg string // used as a fix string to be included in the reply echos seen map[string]int // used to track the seen events @@ -32,12 +33,12 @@ type EchoEngine struct { mockcomponent.Component } -func NewEchoEngine(t *testing.T, net network.Network, cap int, channel network.Channel, echo bool, send ConduitSendWrapperFunc) *EchoEngine { +func NewEchoEngine(t *testing.T, net network.Network, cap int, channel channels.Channel, echo bool, send ConduitSendWrapperFunc) *EchoEngine { te := &EchoEngine{ t: t, echomsg: "this is an echo", event: make(chan interface{}, cap), - channel: make(chan network.Channel, cap), + channel: make(chan channels.Channel, cap), received: make(chan struct{}, cap), seen: make(map[string]int), echo: echo, @@ -59,7 +60,7 @@ func (te *EchoEngine) SubmitLocal(event interface{}) { // Submit is implemented for a valid type assertion to Engine // any call to it fails the test -func (te *EchoEngine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (te *EchoEngine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { go func() { err := te.Process(channel, originID, event) if err != nil { @@ -78,7 +79,7 @@ func (te *EchoEngine) ProcessLocal(event interface{}) error { // Process receives an originID and an event and casts them into the corresponding fields of the // EchoEngine. It then flags the received channel on reception of an event. // It also sends back an echo of the message to the origin ID -func (te *EchoEngine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (te *EchoEngine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { te.Lock() defer te.Unlock() te.originID = originID diff --git a/network/test/echoengine_test.go b/network/test/echoengine_test.go index d4451681ade..bc3c215c9ed 100644 --- a/network/test/echoengine_test.go +++ b/network/test/echoengine_test.go @@ -10,6 +10,7 @@ import ( "time" "github.com/ipfs/go-log" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -66,16 +67,16 @@ func (suite *EchoEngineTestSuite) TearDownTest() { // TestUnknownChannel evaluates that registering an engine with an unknown channel returns an error. // All channels should be registered as topics in engine.topicMap. func (suite *EchoEngineTestSuite) TestUnknownChannel() { - e := NewEchoEngine(suite.T(), suite.nets[0], 1, network.TestNetworkChannel, false, suite.Unicast) + e := NewEchoEngine(suite.T(), suite.nets[0], 1, channels.TestNetworkChannel, false, suite.Unicast) _, err := suite.nets[0].Register("unknown-channel-id", e) require.Error(suite.T(), err) } // TestClusterChannel evaluates that registering a cluster channel is done without any error. func (suite *EchoEngineTestSuite) TestClusterChannel() { - e := NewEchoEngine(suite.T(), suite.nets[0], 1, network.TestNetworkChannel, false, suite.Unicast) + e := NewEchoEngine(suite.T(), suite.nets[0], 1, channels.TestNetworkChannel, false, suite.Unicast) // creates a cluster channel - clusterChannel := network.ChannelSyncCluster(flow.Testnet) + clusterChannel := channels.ChannelSyncCluster(flow.Testnet) // registers engine with cluster channel _, err := suite.nets[0].Register(clusterChannel, e) // registering cluster channel should not cause an error @@ -85,11 +86,11 @@ func (suite *EchoEngineTestSuite) TestClusterChannel() { // TestDuplicateChannel evaluates that registering an engine with duplicate channel returns an error. func (suite *EchoEngineTestSuite) TestDuplicateChannel() { // creates an echo engine, which registers it on test network channel - e := NewEchoEngine(suite.T(), suite.nets[0], 1, network.TestNetworkChannel, false, suite.Unicast) + e := NewEchoEngine(suite.T(), suite.nets[0], 1, channels.TestNetworkChannel, false, suite.Unicast) // attempts to register the same engine again on test network channel which // should cause an error - _, err := suite.nets[0].Register(network.TestNetworkChannel, e) + _, err := suite.nets[0].Register(channels.TestNetworkChannel, e) require.Error(suite.T(), err) } @@ -196,10 +197,10 @@ func (suite *EchoEngineTestSuite) duplicateMessageSequential(send ConduitSendWra rcvID := 1 // registers engines in the network // sender's engine - sender := NewEchoEngine(suite.Suite.T(), suite.nets[sndID], 10, network.TestNetworkChannel, false, send) + sender := NewEchoEngine(suite.Suite.T(), suite.nets[sndID], 10, channels.TestNetworkChannel, false, send) // receiver's engine - receiver := NewEchoEngine(suite.Suite.T(), suite.nets[rcvID], 10, network.TestNetworkChannel, false, send) + receiver := NewEchoEngine(suite.Suite.T(), suite.nets[rcvID], 10, channels.TestNetworkChannel, false, send) // allow nodes to heartbeat and discover each other if using PubSub optionalSleep(send) @@ -231,10 +232,10 @@ func (suite *EchoEngineTestSuite) duplicateMessageParallel(send ConduitSendWrapp rcvID := 1 // registers engines in the network // sender's engine - sender := NewEchoEngine(suite.Suite.T(), suite.nets[sndID], 10, network.TestNetworkChannel, false, send) + sender := NewEchoEngine(suite.Suite.T(), suite.nets[sndID], 10, channels.TestNetworkChannel, false, send) // receiver's engine - receiver := NewEchoEngine(suite.Suite.T(), suite.nets[rcvID], 10, network.TestNetworkChannel, false, send) + receiver := NewEchoEngine(suite.Suite.T(), suite.nets[rcvID], 10, channels.TestNetworkChannel, false, send) // allow nodes to heartbeat and discover each other optionalSleep(send) @@ -272,8 +273,8 @@ func (suite *EchoEngineTestSuite) duplicateMessageDifferentChan(send ConduitSend rcvNode ) const ( - channel1 = network.TestNetworkChannel - channel2 = network.TestMetricsChannel + channel1 = channels.TestNetworkChannel + channel2 = channels.TestMetricsChannel ) // registers engines in the network // first type @@ -337,10 +338,10 @@ func (suite *EchoEngineTestSuite) singleMessage(echo bool, send ConduitSendWrapp // registers engines in the network // sender's engine - sender := NewEchoEngine(suite.Suite.T(), suite.nets[sndID], 10, network.TestNetworkChannel, echo, send) + sender := NewEchoEngine(suite.Suite.T(), suite.nets[sndID], 10, channels.TestNetworkChannel, echo, send) // receiver's engine - receiver := NewEchoEngine(suite.Suite.T(), suite.nets[rcvID], 10, network.TestNetworkChannel, echo, send) + receiver := NewEchoEngine(suite.Suite.T(), suite.nets[rcvID], 10, channels.TestNetworkChannel, echo, send) // allow nodes to heartbeat and discover each other optionalSleep(send) @@ -362,7 +363,7 @@ func (suite *EchoEngineTestSuite) singleMessage(echo bool, send ConduitSendWrapp assert.Equal(suite.Suite.T(), suite.ids[sndID].NodeID, receiver.originID) receiver.RUnlock() - assertMessageReceived(suite.T(), receiver, event, network.TestNetworkChannel) + assertMessageReceived(suite.T(), receiver, event, channels.TestNetworkChannel) case <-time.After(10 * time.Second): assert.Fail(suite.Suite.T(), "sender failed to send a message to receiver") @@ -384,7 +385,7 @@ func (suite *EchoEngineTestSuite) singleMessage(echo bool, send ConduitSendWrapp echoEvent := &message.TestMessage{ Text: fmt.Sprintf("%s: %s", receiver.echomsg, event.Text), } - assertMessageReceived(suite.T(), sender, echoEvent, network.TestNetworkChannel) + assertMessageReceived(suite.T(), sender, echoEvent, channels.TestNetworkChannel) case <-time.After(10 * time.Second): assert.Fail(suite.Suite.T(), "receiver failed to send an echo message back to sender") @@ -402,10 +403,10 @@ func (suite *EchoEngineTestSuite) multiMessageSync(echo bool, count int, send Co rcvID := 1 // registers engines in the network // sender's engine - sender := NewEchoEngine(suite.Suite.T(), suite.nets[sndID], 10, network.TestNetworkChannel, echo, send) + sender := NewEchoEngine(suite.Suite.T(), suite.nets[sndID], 10, channels.TestNetworkChannel, echo, send) // receiver's engine - receiver := NewEchoEngine(suite.Suite.T(), suite.nets[rcvID], 10, network.TestNetworkChannel, echo, send) + receiver := NewEchoEngine(suite.Suite.T(), suite.nets[rcvID], 10, channels.TestNetworkChannel, echo, send) // allow nodes to heartbeat and discover each other optionalSleep(send) @@ -428,7 +429,7 @@ func (suite *EchoEngineTestSuite) multiMessageSync(echo bool, count int, send Co assert.Equal(suite.Suite.T(), suite.ids[sndID].NodeID, receiver.originID) receiver.RUnlock() - assertMessageReceived(suite.T(), receiver, event, network.TestNetworkChannel) + assertMessageReceived(suite.T(), receiver, event, channels.TestNetworkChannel) case <-time.After(2 * time.Second): assert.Fail(suite.Suite.T(), "sender failed to send a message to receiver") @@ -450,7 +451,7 @@ func (suite *EchoEngineTestSuite) multiMessageSync(echo bool, count int, send Co echoEvent := &message.TestMessage{ Text: fmt.Sprintf("%s: %s", receiver.echomsg, event.Text), } - assertMessageReceived(suite.T(), sender, echoEvent, network.TestNetworkChannel) + assertMessageReceived(suite.T(), sender, echoEvent, channels.TestNetworkChannel) receiver.RUnlock() sender.RUnlock() @@ -473,10 +474,10 @@ func (suite *EchoEngineTestSuite) multiMessageAsync(echo bool, count int, send C // registers engines in the network // sender's engine - sender := NewEchoEngine(suite.Suite.T(), suite.nets[sndID], 10, network.TestNetworkChannel, echo, send) + sender := NewEchoEngine(suite.Suite.T(), suite.nets[sndID], 10, channels.TestNetworkChannel, echo, send) // receiver's engine - receiver := NewEchoEngine(suite.Suite.T(), suite.nets[rcvID], 10, network.TestNetworkChannel, echo, send) + receiver := NewEchoEngine(suite.Suite.T(), suite.nets[rcvID], 10, channels.TestNetworkChannel, echo, send) // allow nodes to heartbeat and discover each other optionalSleep(send) @@ -522,7 +523,7 @@ func (suite *EchoEngineTestSuite) multiMessageAsync(echo bool, count int, send C received[rcvEvent.Text] = struct{}{} // evaluates channel that message was received on - assert.Equal(suite.T(), network.TestNetworkChannel, <-receiver.channel) + assert.Equal(suite.T(), channels.TestNetworkChannel, <-receiver.channel) }, 100*time.Millisecond) case <-time.After(2 * time.Second): @@ -561,7 +562,7 @@ func (suite *EchoEngineTestSuite) multiMessageAsync(echo bool, count int, send C received[rcvEvent.Text] = struct{}{} // evaluates channel that message was received on - assert.Equal(suite.T(), network.TestNetworkChannel, <-sender.channel) + assert.Equal(suite.T(), channels.TestNetworkChannel, <-sender.channel) }, 100*time.Millisecond) case <-time.After(10 * time.Second): @@ -573,7 +574,7 @@ func (suite *EchoEngineTestSuite) multiMessageAsync(echo bool, count int, send C // assertMessageReceived asserts that the given message was received on the given channel // for the given engine -func assertMessageReceived(t *testing.T, e *EchoEngine, m *message.TestMessage, c network.Channel) { +func assertMessageReceived(t *testing.T, e *EchoEngine, m *message.TestMessage, c channels.Channel) { // wrap blocking channel reads with a timeout unittest.AssertReturnsBefore(t, func() { // evaluates proper reception of event diff --git a/network/test/meshengine.go b/network/test/meshengine.go index 56bb954a5cb..109a72c54af 100644 --- a/network/test/meshengine.go +++ b/network/test/meshengine.go @@ -5,6 +5,7 @@ import ( "sync" "testing" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" @@ -17,19 +18,19 @@ import ( type MeshEngine struct { sync.Mutex t *testing.T - con network.Conduit // used to directly communicate with the network - originID flow.Identifier // used to keep track of the id of the sender of the messages - event chan interface{} // used to keep track of the events that the node receives - channel chan network.Channel // used to keep track of the channels that events are received on - received chan struct{} // used as an indicator on reception of messages for testing + con network.Conduit // used to directly communicate with the network + originID flow.Identifier // used to keep track of the id of the sender of the messages + event chan interface{} // used to keep track of the events that the node receives + channel chan channels.Channel // used to keep track of the channels that events are received on + received chan struct{} // used as an indicator on reception of messages for testing mockcomponent.Component } -func NewMeshEngine(t *testing.T, net network.Network, cap int, channel network.Channel) *MeshEngine { +func NewMeshEngine(t *testing.T, net network.Network, cap int, channel channels.Channel) *MeshEngine { te := &MeshEngine{ t: t, event: make(chan interface{}, cap), - channel: make(chan network.Channel, cap), + channel: make(chan channels.Channel, cap), received: make(chan struct{}, cap), } @@ -48,7 +49,7 @@ func (e *MeshEngine) SubmitLocal(event interface{}) { // Submit is implemented for a valid type assertion to Engine // any call to it fails the test -func (e *MeshEngine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { +func (e *MeshEngine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { go func() { err := e.Process(channel, originID, event) if err != nil { @@ -66,7 +67,7 @@ func (e *MeshEngine) ProcessLocal(event interface{}) error { // Process receives an originID and an event and casts them into the corresponding fields of the // MeshEngine. It then flags the received channel on reception of an event. -func (e *MeshEngine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (e *MeshEngine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { e.Lock() defer e.Unlock() diff --git a/network/test/meshengine_test.go b/network/test/meshengine_test.go index 8eb7de169ed..e560e1670ae 100644 --- a/network/test/meshengine_test.go +++ b/network/test/meshengine_test.go @@ -13,6 +13,7 @@ import ( "github.com/ipfs/go-log" pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -172,7 +173,7 @@ func (suite *MeshEngineTestSuite) allToAllScenario(send ConduitSendWrapperFunc) // logs[i][j] keeps the message that node i sends to node j logs := make(map[int][]string) for i := range suite.nets { - eng := NewMeshEngine(suite.Suite.T(), suite.nets[i], count-1, network.TestNetworkChannel) + eng := NewMeshEngine(suite.Suite.T(), suite.nets[i], count-1, channels.TestNetworkChannel) engs = append(engs, eng) logs[i] = make([]string, 0) } @@ -220,7 +221,7 @@ func (suite *MeshEngineTestSuite) allToAllScenario(send ConduitSendWrapperFunc) } for i := 0; i < count-1; i++ { - assertChannelReceived(suite.T(), e, network.TestNetworkChannel) + assertChannelReceived(suite.T(), e, channels.TestNetworkChannel) } // extracts failed messages @@ -252,7 +253,7 @@ func (suite *MeshEngineTestSuite) targetValidatorScenario(send ConduitSendWrappe wg := sync.WaitGroup{} for i := range suite.nets { - eng := NewMeshEngine(suite.Suite.T(), suite.nets[i], count-1, network.TestNetworkChannel) + eng := NewMeshEngine(suite.Suite.T(), suite.nets[i], count-1, channels.TestNetworkChannel) engs = append(engs, eng) } @@ -295,7 +296,7 @@ func (suite *MeshEngineTestSuite) targetValidatorScenario(send ConduitSendWrappe for index, e := range engs { if index < len(engs)/2 { assert.Len(suite.Suite.T(), e.event, 1, fmt.Sprintf("message not received %v", index)) - assertChannelReceived(suite.T(), e, network.TestNetworkChannel) + assertChannelReceived(suite.T(), e, channels.TestNetworkChannel) } else { assert.Len(suite.Suite.T(), e.event, 0, fmt.Sprintf("message received when none was expected %v", index)) } @@ -312,7 +313,7 @@ func (suite *MeshEngineTestSuite) messageSizeScenario(send ConduitSendWrapperFun wg := sync.WaitGroup{} for i := range suite.nets { - eng := NewMeshEngine(suite.Suite.T(), suite.nets[i], count-1, network.TestNetworkChannel) + eng := NewMeshEngine(suite.Suite.T(), suite.nets[i], count-1, channels.TestNetworkChannel) engs = append(engs, eng) } @@ -350,7 +351,7 @@ func (suite *MeshEngineTestSuite) messageSizeScenario(send ConduitSendWrapperFun // evaluates that all messages are received for index, e := range engs[1:] { assert.Len(suite.Suite.T(), e.event, 1, "message not received by engine %d", index+1) - assertChannelReceived(suite.T(), e, network.TestNetworkChannel) + assertChannelReceived(suite.T(), e, channels.TestNetworkChannel) } } @@ -365,7 +366,7 @@ func (suite *MeshEngineTestSuite) conduitCloseScenario(send ConduitSendWrapperFu wg := sync.WaitGroup{} for i := range suite.nets { - eng := NewMeshEngine(suite.Suite.T(), suite.nets[i], count-1, network.TestNetworkChannel) + eng := NewMeshEngine(suite.Suite.T(), suite.nets[i], count-1, channels.TestNetworkChannel) engs = append(engs, eng) } @@ -431,7 +432,7 @@ func (suite *MeshEngineTestSuite) conduitCloseScenario(send ConduitSendWrapperFu } // assertChannelReceived asserts that the given channel was received on the given engine -func assertChannelReceived(t *testing.T, e *MeshEngine, channel network.Channel) { +func assertChannelReceived(t *testing.T, e *MeshEngine, channel channels.Channel) { unittest.AssertReturnsBefore(t, func() { assert.Equal(t, channel, <-e.channel) }, 100*time.Millisecond) diff --git a/network/test/middleware_test.go b/network/test/middleware_test.go index 8de2020e686..79cfc829f89 100644 --- a/network/test/middleware_test.go +++ b/network/test/middleware_test.go @@ -10,6 +10,7 @@ import ( "github.com/ipfs/go-log" swarm "github.com/libp2p/go-libp2p-swarm" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -32,7 +33,7 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) -const testChannel = network.PublicSyncCommittee +const testChannel = channels.PublicSyncCommittee // libp2p emits a call to `Protect` with a topic-specific tag upon establishing each peering connection in a GossipSUb mesh, see: // https://github.com/libp2p/go-libp2p-pubsub/blob/master/tag_tracer.go diff --git a/network/test/testUtil.go b/network/test/testUtil.go index e0d10db758c..e35a6375409 100644 --- a/network/test/testUtil.go +++ b/network/test/testUtil.go @@ -16,6 +16,7 @@ import ( "github.com/libp2p/go-libp2p-core/routing" dht "github.com/libp2p/go-libp2p-kad-dht" pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/require" @@ -319,7 +320,7 @@ func GenerateEngines(t *testing.T, nets []network.Network) []*MeshEngine { count := len(nets) engs := make([]*MeshEngine, count) for i, n := range nets { - eng := NewMeshEngine(t, n, 100, network.TestNetworkChannel) + eng := NewMeshEngine(t, n, 100, channels.TestNetworkChannel) engs[i] = eng } return engs diff --git a/network/topology.go b/network/topology.go index 1d746c70896..319742d187e 100644 --- a/network/topology.go +++ b/network/topology.go @@ -2,6 +2,7 @@ package network import ( "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/network/channels" ) // Topology provides a subset of nodes which a given node should directly connect to for 1-k messaging. @@ -19,5 +20,5 @@ type Topology interface { // // GenerateFanout is not concurrency safe. It is responsibility of caller to lock for it. // with the channels argument, it allows the returned topology to be cached, which is necessary for randomized topology. - GenerateFanout(ids flow.IdentityList, channels ChannelList) (flow.IdentityList, error) + GenerateFanout(ids flow.IdentityList, channels channels.ChannelList) (flow.IdentityList, error) } diff --git a/network/topology/cache.go b/network/topology/cache.go index c305140abed..1d0938dff23 100644 --- a/network/topology/cache.go +++ b/network/topology/cache.go @@ -1,6 +1,7 @@ package topology import ( + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" @@ -47,7 +48,7 @@ func NewCache(log zerolog.Logger, top network.Topology) *Cache { // // Note that this implementation of GenerateFanout preserves same output as long as input is the same. This // should not be assumed as a 1-1 mapping between input and output. -func (c *Cache) GenerateFanout(ids flow.IdentityList, channels network.ChannelList) (flow.IdentityList, error) { +func (c *Cache) GenerateFanout(ids flow.IdentityList, channels channels.ChannelList) (flow.IdentityList, error) { inputIdsFP := ids.Fingerprint() inputChansFP := channels.ID() diff --git a/network/topology/cache_test.go b/network/topology/cache_test.go index 72122a359da..edfbbf03e73 100644 --- a/network/topology/cache_test.go +++ b/network/topology/cache_test.go @@ -5,13 +5,13 @@ import ( "os" "testing" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" mockprotocol "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/utils/unittest" @@ -27,7 +27,7 @@ func TestCache_GenerateFanout_HappyPath(t *testing.T) { log := zerolog.New(os.Stderr).Level(zerolog.DebugLevel) ids := unittest.IdentityListFixture(100) fanout := ids.Sample(10) - channels := network.ChannelList{"Channel1", "Channel2", "Channel3"} + channels := channels.ChannelList{"Channel1", "Channel2", "Channel3"} top.On("GenerateFanout", ids, channels).Return(fanout, nil).Once() cache := NewCache(log, top) @@ -58,7 +58,7 @@ func TestCache_GenerateFanout_Error(t *testing.T) { // returning error on fanout generation should invalidate cache // same error should be returned. - fanout, err := cache.GenerateFanout(ids, network.ChannelList{}) + fanout, err := cache.GenerateFanout(ids, channels.ChannelList{}) require.Error(t, err) require.Nil(t, fanout) require.Equal(t, cache.idsFP, flow.Identifier{}) @@ -76,7 +76,7 @@ func TestCache_InputChange_IDs(t *testing.T) { top := &mocknetwork.Topology{} ids := unittest.IdentityListFixture(100) fanout := ids.Sample(10) - channels := network.ChannelList{"channel1", "channel2"} + channels := channels.ChannelList{"channel1", "channel2"} top.On("GenerateFanout", mock.Anything, channels).Return(fanout, nil).Once() // assumes cache holding some fanout for the same @@ -109,7 +109,7 @@ func TestCache_InputChange_Channels(t *testing.T) { top := &mocknetwork.Topology{} ids := unittest.IdentityListFixture(100) fanout := ids.Sample(10) - channels := network.ChannelList{"channel1", "channel2"} + channels := channels.ChannelList{"channel1", "channel2"} top.On("GenerateFanout", ids, mock.Anything).Return(fanout, nil).Once() // assumes cache holding some fanout for the same @@ -188,7 +188,7 @@ func TestCache_TopicBased(t *testing.T) { // requireDeterministicBehavior evaluates that consecutive invocations of cache on fanout generation with the same input (i.e., ids and channels), // results in the same output fanout as the one passed to the method. -func requireDeterministicBehavior(t *testing.T, cache *Cache, fanout flow.IdentityList, ids flow.IdentityList, channels network.ChannelList) { +func requireDeterministicBehavior(t *testing.T, cache *Cache, fanout flow.IdentityList, ids flow.IdentityList, channels channels.ChannelList) { // Testing deterministic // // Over consecutive invocations of cache with the same (new) input, the same diff --git a/network/topology/fixedListTopology.go b/network/topology/fixedListTopology.go index b0259ad9b41..7baeabd47a9 100644 --- a/network/topology/fixedListTopology.go +++ b/network/topology/fixedListTopology.go @@ -1,6 +1,7 @@ package topology import ( + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" @@ -28,7 +29,7 @@ func NewFixedListTopology(nodeID flow.Identifier) FixedListTopology { } } -func (r FixedListTopology) GenerateFanout(ids flow.IdentityList, _ network.ChannelList) (flow.IdentityList, error) { +func (r FixedListTopology) GenerateFanout(ids flow.IdentityList, _ channels.ChannelList) (flow.IdentityList, error) { return ids.Filter(filter.HasNodeID(r.fixedNodeID)), nil } @@ -36,6 +37,6 @@ func (r FixedListTopology) GenerateFanout(ids flow.IdentityList, _ network.Chann type EmptyListTopology struct { } -func (r EmptyListTopology) GenerateFanout(_ flow.IdentityList, _ network.ChannelList) (flow.IdentityList, error) { +func (r EmptyListTopology) GenerateFanout(_ flow.IdentityList, _ channels.ChannelList) (flow.IdentityList, error) { return flow.IdentityList{}, nil } diff --git a/network/topology/fullyConnectedTopology.go b/network/topology/fullyConnectedTopology.go index 876e15f4929..d8dba83af3a 100644 --- a/network/topology/fullyConnectedTopology.go +++ b/network/topology/fullyConnectedTopology.go @@ -1,6 +1,7 @@ package topology import ( + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" @@ -23,6 +24,6 @@ func NewFullyConnectedTopology() network.Topology { return &FullyConnectedTopology{} } -func (f *FullyConnectedTopology) GenerateFanout(ids flow.IdentityList, _ network.ChannelList) (flow.IdentityList, error) { +func (f *FullyConnectedTopology) GenerateFanout(ids flow.IdentityList, _ channels.ChannelList) (flow.IdentityList, error) { return ids, nil } diff --git a/network/topology/helper.go b/network/topology/helper.go index e3aaa9c779e..fd4e8276cfd 100644 --- a/network/topology/helper.go +++ b/network/topology/helper.go @@ -5,6 +5,7 @@ import ( "math" "testing" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -38,8 +39,8 @@ func MockStateForCollectionNodes(t *testing.T, collectorIds flow.IdentityList, c } // connectednessByChannel verifies that the subgraph of nodes subscribed to a channel is connected. -func connectednessByChannel(t *testing.T, adjMap map[flow.Identifier]flow.IdentityList, ids flow.IdentityList, channel network.Channel) { - roles, ok := network.RolesByChannel(channel) +func connectednessByChannel(t *testing.T, adjMap map[flow.Identifier]flow.IdentityList, ids flow.IdentityList, channel channels.Channel) { + roles, ok := channels.RolesByChannel(channel) require.True(t, ok) Connected(t, adjMap, ids, filter.HasRole(roles...)) } @@ -81,7 +82,7 @@ func MockSubscriptionManager(t *testing.T, ids flow.IdentityList) []network.Subs sm.On("Register", mock.Anything, mock.Anything).Return(err) sm.On("Unregister", mock.Anything).Return(err) sm.On("GetEngine", mock.Anything).Return(err) - sm.On("Channels").Return(network.ChannelsByRole(id.Role)) + sm.On("Channels").Return(channels.ChannelsByRole(id.Role)) sms[i] = sm } diff --git a/network/topology/randomizedTopology.go b/network/topology/randomizedTopology.go index 182ab394e51..3e5a89d1626 100644 --- a/network/topology/randomizedTopology.go +++ b/network/topology/randomizedTopology.go @@ -3,6 +3,7 @@ package topology import ( "fmt" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/crypto/random" @@ -70,8 +71,8 @@ func NewRandomizedTopology(nodeID flow.Identifier, logger zerolog.Logger, edgePr // Independent invocations of GenerateFanout on different nodes collaboratively must construct a cohesive // connected graph of nodes that enables them talking to each other. This should be done with a very high probability // in randomized topology. -func (r RandomizedTopology) GenerateFanout(ids flow.IdentityList, channels network.ChannelList) (flow.IdentityList, error) { - myUniqueChannels := network.UniqueChannels(channels) +func (r RandomizedTopology) GenerateFanout(ids flow.IdentityList, channels channels.ChannelList) (flow.IdentityList, error) { + myUniqueChannels := channels.UniqueChannels(channels) if len(myUniqueChannels) == 0 { // no subscribed channel, hence skip topology creation // we do not return an error at this state as invocation of MakeTopology may happen before @@ -103,12 +104,12 @@ func (r RandomizedTopology) GenerateFanout(ids flow.IdentityList, channels netwo // subsetChannel returns a random subset of the identity list that is passed. // Returned identities should all subscribed to the specified `channel`. // Note: this method should not include identity of its executor. -func (r RandomizedTopology) subsetChannel(ids flow.IdentityList, channel network.Channel) (flow.IdentityList, error) { +func (r RandomizedTopology) subsetChannel(ids flow.IdentityList, channel channels.Channel) (flow.IdentityList, error) { // excludes node itself sampleSpace := ids.Filter(filter.Not(filter.HasNodeID(r.myNodeID))) // samples a random graph based on whether channel is cluster-based or not. - if network.IsClusterChannel(channel) { + if channels.IsClusterChannel(channel) { return r.clusterChannelHandler(sampleSpace) } return r.nonClusterChannelHandler(sampleSpace, channel) @@ -158,13 +159,13 @@ func (r RandomizedTopology) clusterChannelHandler(ids flow.IdentityList) (flow.I } // clusterChannelHandler returns a connected graph fanout of peers from `ids` that subscribed to `channel`. -func (r RandomizedTopology) nonClusterChannelHandler(ids flow.IdentityList, channel network.Channel) (flow.IdentityList, error) { - if network.IsClusterChannel(channel) { +func (r RandomizedTopology) nonClusterChannelHandler(ids flow.IdentityList, channel channels.Channel) (flow.IdentityList, error) { + if channels.IsClusterChannel(channel) { return nil, fmt.Errorf("could not handle cluster channel: %s", channel) } // extracts flow roles subscribed to topic. - roles, ok := network.RolesByChannel(channel) + roles, ok := channels.RolesByChannel(channel) if !ok { return nil, fmt.Errorf("unknown topic with no subscribed roles: %s", channel) } diff --git a/network/topology/randomizedTopology_test.go b/network/topology/randomizedTopology_test.go index 88626947fd2..1a9d1e13a4d 100644 --- a/network/topology/randomizedTopology_test.go +++ b/network/topology/randomizedTopology_test.go @@ -5,13 +5,13 @@ import ( "sort" "testing" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/utils/unittest" ) @@ -89,7 +89,7 @@ func (suite *RandomizedTopologyTestSuite) TestUnhappyInitialization() { func (suite *RandomizedTopologyTestSuite) TestUniqueness() { var previous, current []string - topics := network.ChannelsByRole(flow.RoleConsensus) + topics := channels.ChannelsByRole(flow.RoleConsensus) require.Greater(suite.T(), len(topics), 1) for _, identity := range suite.all { @@ -126,7 +126,7 @@ func (suite *RandomizedTopologyTestSuite) TestUniqueness() { // TestConnectedness_NonClusterChannel checks whether graph components corresponding to a // non-cluster channel are individually connected. func (suite *RandomizedTopologyTestSuite) TestConnectedness_NonClusterChannel() { - channel := network.TestNetworkChannel + channel := channels.TestNetworkChannel // adjacency map keeps graph component of a single channel channelAdjMap := make(map[flow.Identifier]flow.IdentityList) @@ -151,7 +151,7 @@ func (suite *RandomizedTopologyTestSuite) TestConnectedness_NonClusterChannel() // cluster channel are individually connected. func (suite *RandomizedTopologyTestSuite) TestConnectedness_ClusterChannel() { // picks one cluster channel as sample - channel := network.ChannelSyncCluster(flow.Emulator) + channel := channels.ChannelSyncCluster(flow.Emulator) // adjacency map keeps graph component of a single channel channelAdjMap := make(map[flow.Identifier]flow.IdentityList) diff --git a/network/topology/topicBasedTopology.go b/network/topology/topicBasedTopology.go index 3ac2d754b8c..d585f8019d1 100644 --- a/network/topology/topicBasedTopology.go +++ b/network/topology/topicBasedTopology.go @@ -3,6 +3,7 @@ package topology import ( "fmt" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" @@ -50,8 +51,8 @@ func NewTopicBasedTopology(nodeID flow.Identifier, logger zerolog.Logger, state // of the messages (i.e., publish and multicast). // Independent invocations of GenerateFanout on different nodes collaboratively must construct a cohesive // connected graph of nodes that enables them talking to each other. -func (t TopicBasedTopology) GenerateFanout(ids flow.IdentityList, channels network.ChannelList) (flow.IdentityList, error) { - myUniqueChannels := network.UniqueChannels(channels) +func (t TopicBasedTopology) GenerateFanout(ids flow.IdentityList, channels channels.ChannelList) (flow.IdentityList, error) { + myUniqueChannels := channels.UniqueChannels(channels) if len(myUniqueChannels) == 0 { // no subscribed channel, hence skip topology creation // we do not return an error at this state as invocation of MakeTopology may happen before @@ -63,7 +64,7 @@ func (t TopicBasedTopology) GenerateFanout(ids flow.IdentityList, channels netwo // finds all interacting roles with this node myInteractingRoles := flow.RoleList{} for _, myChannel := range myUniqueChannels { - roles, ok := network.RolesByChannel(myChannel) + roles, ok := channels.RolesByChannel(myChannel) if !ok { return nil, fmt.Errorf("could not extract roles for channel: %s", myChannel) } @@ -109,8 +110,8 @@ func (t TopicBasedTopology) GenerateFanout(ids flow.IdentityList, channels netwo // identities that should be included in the returned subset. // Returned identities should all subscribed to the specified `channel`. // Note: this method should not include identity of its executor. -func (t *TopicBasedTopology) subsetChannel(ids flow.IdentityList, shouldHave flow.IdentityList, channel network.Channel) (flow.IdentityList, error) { - if network.IsClusterChannel(channel) { +func (t *TopicBasedTopology) subsetChannel(ids flow.IdentityList, shouldHave flow.IdentityList, channel channels.Channel) (flow.IdentityList, error) { + if channels.IsClusterChannel(channel) { return t.clusterChannelHandler(ids, shouldHave) } return t.nonClusterChannelHandler(ids, shouldHave, channel) @@ -204,13 +205,13 @@ func (t TopicBasedTopology) clusterChannelHandler(ids, shouldHave flow.IdentityL // nonClusterChannelHandler returns a connected graph fanout of peers from `ids` that subscribed to `channel`. // The returned sample contains `shouldHave` ones that also subscribed to `channel`. -func (t TopicBasedTopology) nonClusterChannelHandler(ids, shouldHave flow.IdentityList, channel network.Channel) (flow.IdentityList, error) { - if network.IsClusterChannel(channel) { +func (t TopicBasedTopology) nonClusterChannelHandler(ids, shouldHave flow.IdentityList, channel channels.Channel) (flow.IdentityList, error) { + if channels.IsClusterChannel(channel) { return nil, fmt.Errorf("could not handle cluster channel: %s", channel) } // extracts flow roles subscribed to topic. - roles, ok := network.RolesByChannel(channel) + roles, ok := channels.RolesByChannel(channel) if !ok { return nil, fmt.Errorf("unknown topic with no subscribed roles: %s", channel) } diff --git a/network/topology/topicBasedTopology_test.go b/network/topology/topicBasedTopology_test.go index 450871555e4..b18402d1acb 100644 --- a/network/topology/topicBasedTopology_test.go +++ b/network/topology/topicBasedTopology_test.go @@ -5,13 +5,13 @@ import ( "sort" "testing" + "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/utils/unittest" ) @@ -59,12 +59,12 @@ func (suite *TopicAwareTopologyTestSuite) TestTopologySize_Topic() { top, err := NewTopicBasedTopology(suite.all[0].NodeID, suite.logger, suite.state) require.NoError(suite.T(), err) - topics := network.ChannelsByRole(suite.all[0].Role) + topics := channels.ChannelsByRole(suite.all[0].Role) require.Greater(suite.T(), len(topics), 1) for _, topic := range topics { // extracts total number of nodes subscribed to topic - roles, ok := network.RolesByChannel(topic) + roles, ok := channels.RolesByChannel(topic) require.True(suite.T(), ok) ids, err := top.subsetChannel(suite.all, nil, topic) @@ -86,7 +86,7 @@ func (suite *TopicAwareTopologyTestSuite) TestDeteministicity() { top, err := NewTopicBasedTopology(suite.all[0].NodeID, suite.logger, suite.state) require.NoError(suite.T(), err) - topics := network.ChannelsByRole(suite.all[0].Role) + topics := channels.ChannelsByRole(suite.all[0].Role) require.Greater(suite.T(), len(topics), 1) // for each topic samples 100 topologies @@ -135,7 +135,7 @@ func (suite *TopicAwareTopologyTestSuite) TestUniqueness() { // for each topic samples 100 topologies // all topologies for a topic should be the same - topics := network.ChannelsByRole(flow.RoleConsensus) + topics := channels.ChannelsByRole(flow.RoleConsensus) require.Greater(suite.T(), len(topics), 1) for _, identity := range suite.all { @@ -173,7 +173,7 @@ func (suite *TopicAwareTopologyTestSuite) TestUniqueness() { // TestConnectedness_NonClusterTopics checks whether graph components corresponding to a // non-cluster channel are individually connected. func (suite *TopicAwareTopologyTestSuite) TestConnectedness_NonClusterChannel() { - channel := network.TestNetworkChannel + channel := channels.TestNetworkChannel // adjacency map keeps graph component of a single channel channelAdjMap := make(map[flow.Identifier]flow.IdentityList) @@ -196,7 +196,7 @@ func (suite *TopicAwareTopologyTestSuite) TestConnectedness_NonClusterChannel() // cluster channel are individually connected. func (suite *TopicAwareTopologyTestSuite) TestConnectedness_ClusterChannel() { // picks one cluster channel as sample - channel := network.ChannelSyncCluster(flow.Emulator) + channel := channels.ChannelSyncCluster(flow.Emulator) // adjacency map keeps graph component of a single channel channelAdjMap := make(map[flow.Identifier]flow.IdentityList) diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index 0e8550c13df..aad1051908a 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -9,6 +9,8 @@ import ( "github.com/libp2p/go-libp2p-core/peer" pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/message" "github.com/rs/zerolog" "github.com/onflow/flow-go/network" @@ -27,7 +29,7 @@ var ( // The MessageValidator returned will use the getIdentity to get the flow identity for the sender, asserting that the sender is a staked node. // If the sender is an unstaked node the message is rejected. IsAuthorizedSender is used to perform further message validation. If validation // fails the message is rejected, if the validation error is an expected error slashing data is collected before the message is rejected. -func AuthorizedSenderValidator(log zerolog.Logger, channel network.Channel, getIdentity func(peer.ID) (*flow.Identity, bool)) MessageValidator { +func AuthorizedSenderValidator(log zerolog.Logger, channel channels.Channel, getIdentity func(peer.ID) (*flow.Identity, bool)) MessageValidator { log = log.With(). Str("component", "authorized_sender_validator"). Str("network_channel", channel.String()). @@ -78,25 +80,25 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel network.Channel, getI // * ErrSenderEjected: if identity of sender is ejected // * ErrUnknownMessageType: if retrieving the message auth config for msg fails // * ErrUnauthorizedSender: if the message auth config validation for msg fails -func IsAuthorizedSender(identity *flow.Identity, channel network.Channel, msg interface{}) (string, error) { +func IsAuthorizedSender(identity *flow.Identity, channel channels.Channel, msg interface{}) (string, error) { if identity.Ejected { return "", ErrSenderEjected } // get message auth config - conf, err := network.GetMessageAuthConfig(msg) + conf, err := message.GetMessageAuthConfig(msg) if err != nil { return "", fmt.Errorf("%s: %w", err, ErrUnknownMessageType) } // handle special case for cluster prefixed channels - if prefix, ok := network.ClusterChannelPrefix(channel); ok { - channel = network.Channel(prefix) + if prefix, ok := channels.ClusterChannelPrefix(channel); ok { + channel = channels.Channel(prefix) } if err := conf.IsAuthorized(identity.Role, channel); err != nil { - return conf.String, fmt.Errorf("%s: %w", err, ErrUnauthorizedSender) + return conf.Name, fmt.Errorf("%s: %w", err, ErrUnauthorizedSender) } - return conf.String, nil + return conf.Name, nil } diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index 3bf60a1f489..94d227120e4 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -4,19 +4,20 @@ import ( "fmt" "testing" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/message" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/utils/unittest" ) type TestCase struct { - Identity *flow.Identity - Channel network.Channel - Message interface{} + Identity *flow.Identity + Channel channels.Channel + Message interface{} MessageStr string } @@ -65,14 +66,14 @@ func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_ValidationFailure() s.Run("sender is ejected", func() { identity := unittest.IdentityFixture() identity.Ejected = true - msgType, err := IsAuthorizedSender(identity, network.Channel(""), nil) + msgType, err := IsAuthorizedSender(identity, channels.Channel(""), nil) s.Require().ErrorIs(err, ErrSenderEjected) s.Require().Equal("", msgType) }) s.Run("unknown message type", func() { identity := unittest.IdentityFixture() - msgType, err := IsAuthorizedSender(identity, network.Channel(""), nil) + msgType, err := IsAuthorizedSender(identity, channels.Channel(""), nil) s.Require().ErrorIs(err, ErrUnknownMessageType) s.Require().Equal("", msgType) }) @@ -88,7 +89,7 @@ func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_ValidationFailure() Payload: nil, }} - msgType, err := IsAuthorizedSender(identity, network.ConsensusCommittee, m) + msgType, err := IsAuthorizedSender(identity, channels.ConsensusCommittee, m) s.Require().ErrorIs(err, ErrUnknownMessageType) s.Require().Equal("", msgType) }) @@ -96,15 +97,15 @@ func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_ValidationFailure() // initializeTestCases initializes happy and sad path test cases for checking authorized and unauthorized role message combinations. func (s *TestIsAuthorizedSenderSuite) initializeTestCases() { - for _, c := range network.MessageAuthConfigs { + for _, c := range message.AuthorizationConfigs { for channel, authorizedRoles := range c.Config { for _, role := range flow.Roles() { identity := unittest.IdentityFixture(unittest.WithRole(role)) tc := TestCase{ Identity: identity, Channel: channel, - Message: c.Interface(), - MessageStr: c.String, + Message: c.Type(), + MessageStr: c.Name, } if authorizedRoles.Contains(role) { diff --git a/utils/unittest/network/network.go b/utils/unittest/network/network.go index f0b90763b75..100ea9cea46 100644 --- a/utils/unittest/network/network.go +++ b/utils/unittest/network/network.go @@ -3,6 +3,7 @@ package network import ( "fmt" + "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/mock" "github.com/onflow/flow-go/model/flow" @@ -10,14 +11,14 @@ import ( "github.com/onflow/flow-go/network/mocknetwork" ) -type EngineProcessFunc func(network.Channel, flow.Identifier, interface{}) error -type NetworkPublishFunc func(network.Channel, interface{}, ...flow.Identifier) error +type EngineProcessFunc func(channels.Channel, flow.Identifier, interface{}) error +type NetworkPublishFunc func(channels.Channel, interface{}, ...flow.Identifier) error // Conduit represents a mock conduit. type Conduit struct { mocknetwork.Conduit net *Network - channel network.Channel + channel channels.Channel } // Publish sends a message on this mock network, invoking any callback that has @@ -32,8 +33,8 @@ func (c *Conduit) Publish(event interface{}, targetIDs ...flow.Identifier) error // Network represents a mock network. The implementation is not concurrency-safe. type Network struct { mocknetwork.Network - conduits map[network.Channel]*Conduit - engines map[network.Channel]network.MessageProcessor + conduits map[channels.Channel]*Conduit + engines map[channels.Channel]network.MessageProcessor publishFunc NetworkPublishFunc } @@ -41,14 +42,14 @@ type Network struct { func NewNetwork() *Network { return &Network{ Network: mocknetwork.Network{}, - conduits: make(map[network.Channel]*Conduit), - engines: make(map[network.Channel]network.MessageProcessor), + conduits: make(map[channels.Channel]*Conduit), + engines: make(map[channels.Channel]network.MessageProcessor), } } // Register registers an engine with this mock network. If an engine is already registered on the // given channel, this will return an error. -func (n *Network) Register(channel network.Channel, engine network.MessageProcessor) (network.Conduit, error) { +func (n *Network) Register(channel channels.Channel, engine network.MessageProcessor) (network.Conduit, error) { _, ok := n.engines[channel] if ok { return nil, fmt.Errorf("channel already registered: %s", channel) @@ -63,7 +64,7 @@ func (n *Network) Register(channel network.Channel, engine network.MessageProces // Send sends a message to the engine registered to the given channel on this mock network and returns // an error if one occurs. If no engine is registered, this is a noop. -func (n *Network) Send(channel network.Channel, originID flow.Identifier, event interface{}) error { +func (n *Network) Send(channel channels.Channel, originID flow.Identifier, event interface{}) error { if eng, ok := n.engines[channel]; ok { return eng.Process(channel, originID, event) } @@ -92,7 +93,7 @@ func NewEngine() *Engine { // OnProcess specifies the callback that should be executed when `Process` is called on this mock engine. func (e *Engine) OnProcess(processFunc EngineProcessFunc) *Engine { e.On("Process", mock.AnythingOfType("network.Channel"), mock.AnythingOfType("flow.Identifier"), mock.Anything). - Return((func(network.Channel, flow.Identifier, interface{}) error)(processFunc)) + Return((func(channels.Channel, flow.Identifier, interface{}) error)(processFunc)) return e } From 13174eda59de4e5434db73770c76befc4f903a15 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 28 Jun 2022 09:17:55 -0400 Subject: [PATCH 035/223] fix imports --- consensus/integration/integration_test.go | 3 ++- consensus/integration/network_test.go | 1 + engine/access/access_test.go | 3 ++- engine/access/ingestion/engine.go | 3 ++- engine/access/ingestion/engine_test.go | 3 ++- engine/collection/compliance/engine.go | 3 ++- engine/collection/compliance/engine_test.go | 3 ++- engine/collection/ingest/engine.go | 3 ++- engine/collection/pusher/engine.go | 3 ++- engine/collection/pusher/engine_test.go | 3 ++- engine/collection/synchronization/engine.go | 3 ++- engine/collection/synchronization/engine_test.go | 3 ++- .../collection/synchronization/request_handler.go | 3 ++- engine/collection/test/cluster_switchover_test.go | 3 ++- engine/common/follower/engine.go | 3 ++- engine/common/follower/engine_test.go | 3 ++- engine/common/provider/engine.go | 3 ++- engine/common/requester/engine.go | 3 ++- engine/common/splitter/engine.go | 3 ++- engine/common/splitter/engine_test.go | 3 ++- engine/common/splitter/network/example_test.go | 3 ++- engine/common/splitter/network/network.go | 3 ++- engine/common/splitter/network/network_test.go | 3 ++- engine/common/synchronization/engine.go | 3 ++- engine/common/synchronization/engine_test.go | 3 ++- engine/common/synchronization/request_handler.go | 3 ++- .../synchronization/request_handler_engine.go | 3 ++- engine/consensus/compliance/core_test.go | 3 ++- engine/consensus/compliance/engine.go | 3 ++- engine/consensus/compliance/engine_test.go | 3 ++- engine/consensus/dkg/messaging_engine.go | 3 ++- engine/consensus/dkg/messaging_engine_test.go | 3 ++- engine/consensus/ingestion/engine.go | 3 ++- engine/consensus/ingestion/engine_test.go | 3 ++- engine/consensus/matching/engine.go | 3 ++- engine/consensus/matching/engine_test.go | 3 ++- engine/consensus/provider/engine.go | 3 ++- engine/consensus/sealing/engine.go | 3 ++- engine/consensus/sealing/engine_test.go | 3 ++- engine/execution/execution_test.go | 3 ++- engine/execution/ingestion/engine.go | 3 ++- engine/execution/ingestion/engine_test.go | 3 ++- engine/execution/provider/engine.go | 3 ++- engine/ghost/client/ghost_client.go | 3 ++- engine/ghost/engine/handler.go | 3 ++- engine/ghost/engine/rpc.go | 13 +++++++------ engine/testutil/nodes.go | 3 ++- engine/verification/requester/requester.go | 3 ++- engine/verification/requester/requester_test.go | 3 ++- engine/verification/utils/unittest/helper.go | 3 ++- engine/verification/verifier/engine.go | 3 ++- engine/verification/verifier/engine_test.go | 3 ++- insecure/attacknetwork/attackNetwork.go | 3 ++- insecure/corruptible/conduit_test.go | 3 ++- insecure/corruptible/factory.go | 3 ++- insecure/corruptible/factory_test.go | 3 ++- insecure/fixtures.go | 3 ++- insecure/integration/test/composability_test.go | 3 ++- insecure/wintermute/attackOrchestrator.go | 3 ++- insecure/wintermute/attackOrchestrator_test.go | 3 ++- insecure/wintermute/helpers.go | 3 ++- module/metrics/example/collection/main.go | 3 ++- module/metrics/example/consensus/main.go | 3 ++- network/cache/rcvcache_test.go | 3 ++- network/channels/channel.go | 2 +- network/middleware.go | 1 + network/network.go | 1 + network/p2p/dht_test.go | 3 ++- network/p2p/libp2pNode.go | 3 ++- network/p2p/libp2pNodeBuilder.go | 3 ++- network/p2p/middleware.go | 3 ++- network/p2p/network.go | 7 ++++--- network/p2p/sporking_test.go | 3 ++- network/p2p/subscription_filter.go | 1 + network/p2p/subscription_filter_test.go | 3 ++- network/p2p/topic_validator_test.go | 3 ++- network/proxy/network_test.go | 3 ++- network/relay/network.go | 7 ++++--- network/relay/relayer.go | 3 ++- network/stub/network.go | 3 ++- .../validator/pubsub/authorized_sender_validator.go | 3 ++- .../pubsub/authorized_sender_validator_test.go | 9 +++++---- utils/unittest/network/network.go | 3 ++- 83 files changed, 173 insertions(+), 91 deletions(-) diff --git a/consensus/integration/integration_test.go b/consensus/integration/integration_test.go index c6034904f03..6c24c383bf7 100644 --- a/consensus/integration/integration_test.go +++ b/consensus/integration/integration_test.go @@ -6,9 +6,10 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/util" diff --git a/consensus/integration/network_test.go b/consensus/integration/network_test.go index 037de473f91..ff160c687cc 100644 --- a/consensus/integration/network_test.go +++ b/consensus/integration/network_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/hashicorp/go-multierror" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/model/flow" diff --git a/engine/access/access_test.go b/engine/access/access_test.go index d9042692373..1fa4788860c 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/dgraph-io/badger/v2" - "github.com/onflow/flow-go/network/channels" accessproto "github.com/onflow/flow/protobuf/go/flow/access" entitiesproto "github.com/onflow/flow/protobuf/go/flow/entities" execproto "github.com/onflow/flow/protobuf/go/flow/execution" @@ -17,6 +16,8 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/crypto" diff --git a/engine/access/ingestion/engine.go b/engine/access/ingestion/engine.go index 31fa164de41..16fa596679b 100644 --- a/engine/access/ingestion/engine.go +++ b/engine/access/ingestion/engine.go @@ -8,9 +8,10 @@ import ( "fmt" "time" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/access/rpc" diff --git a/engine/access/ingestion/engine_test.go b/engine/access/ingestion/engine_test.go index 4158ec40e46..a826d130902 100644 --- a/engine/access/ingestion/engine_test.go +++ b/engine/access/ingestion/engine_test.go @@ -9,12 +9,13 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + hotmodel "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine/access/rpc" "github.com/onflow/flow-go/model/flow" diff --git a/engine/collection/compliance/engine.go b/engine/collection/compliance/engine.go index dbd575c4adb..629d4ad3546 100644 --- a/engine/collection/compliance/engine.go +++ b/engine/collection/compliance/engine.go @@ -6,9 +6,10 @@ import ( "fmt" "time" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/fifoqueue" diff --git a/engine/collection/compliance/engine_test.go b/engine/collection/compliance/engine_test.go index 8241d5a2d55..e20d7b6e4b5 100644 --- a/engine/collection/compliance/engine_test.go +++ b/engine/collection/compliance/engine_test.go @@ -6,11 +6,12 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/cluster" diff --git a/engine/collection/ingest/engine.go b/engine/collection/ingest/engine.go index 1aa13ca91e0..34afdda6555 100644 --- a/engine/collection/ingest/engine.go +++ b/engine/collection/ingest/engine.go @@ -7,9 +7,10 @@ import ( "errors" "fmt" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/fifoqueue" diff --git a/engine/collection/pusher/engine.go b/engine/collection/pusher/engine.go index 177ed2fb450..221333490bd 100644 --- a/engine/collection/pusher/engine.go +++ b/engine/collection/pusher/engine.go @@ -6,9 +6,10 @@ package pusher import ( "fmt" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" diff --git a/engine/collection/pusher/engine_test.go b/engine/collection/pusher/engine_test.go index f81a6aff08e..3d098aea681 100644 --- a/engine/collection/pusher/engine_test.go +++ b/engine/collection/pusher/engine_test.go @@ -4,11 +4,12 @@ import ( "io/ioutil" "testing" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine/collection/pusher" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" diff --git a/engine/collection/synchronization/engine.go b/engine/collection/synchronization/engine.go index a738f5c7623..4c8eaf32872 100644 --- a/engine/collection/synchronization/engine.go +++ b/engine/collection/synchronization/engine.go @@ -8,9 +8,10 @@ import ( "time" "github.com/hashicorp/go-multierror" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/fifoqueue" commonsync "github.com/onflow/flow-go/engine/common/synchronization" diff --git a/engine/collection/synchronization/engine_test.go b/engine/collection/synchronization/engine_test.go index 9ae12aab7b5..a88616b4aca 100644 --- a/engine/collection/synchronization/engine_test.go +++ b/engine/collection/synchronization/engine_test.go @@ -6,13 +6,14 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine" clustermodel "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/events" diff --git a/engine/collection/synchronization/request_handler.go b/engine/collection/synchronization/request_handler.go index ddb98d1fb6f..6a36d4a189b 100644 --- a/engine/collection/synchronization/request_handler.go +++ b/engine/collection/synchronization/request_handler.go @@ -4,9 +4,10 @@ import ( "errors" "fmt" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine" commonsync "github.com/onflow/flow-go/engine/common/synchronization" clustermodel "github.com/onflow/flow-go/model/cluster" diff --git a/engine/collection/test/cluster_switchover_test.go b/engine/collection/test/cluster_switchover_test.go index 1f74030a453..ef13b53e890 100644 --- a/engine/collection/test/cluster_switchover_test.go +++ b/engine/collection/test/cluster_switchover_test.go @@ -5,10 +5,11 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/cmd/bootstrap/run" "github.com/onflow/flow-go/engine/testutil" testmock "github.com/onflow/flow-go/engine/testutil/mock" diff --git a/engine/common/follower/engine.go b/engine/common/follower/engine.go index 0cb54088bfd..4609049709c 100644 --- a/engine/common/follower/engine.go +++ b/engine/common/follower/engine.go @@ -6,9 +6,10 @@ import ( "fmt" "github.com/hashicorp/go-multierror" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/events" "github.com/onflow/flow-go/model/flow" diff --git a/engine/common/follower/engine_test.go b/engine/common/follower/engine_test.go index f780b1b8781..fa6c696720d 100644 --- a/engine/common/follower/engine_test.go +++ b/engine/common/follower/engine_test.go @@ -3,13 +3,14 @@ package follower_test import ( "testing" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine/common/follower" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/compliance" diff --git a/engine/common/provider/engine.go b/engine/common/provider/engine.go index 2be3c068a1e..4622e43723b 100644 --- a/engine/common/provider/engine.go +++ b/engine/common/provider/engine.go @@ -4,10 +4,11 @@ import ( "errors" "fmt" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/vmihailenco/msgpack" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" diff --git a/engine/common/requester/engine.go b/engine/common/requester/engine.go index 88861de558a..c3f0e34897f 100644 --- a/engine/common/requester/engine.go +++ b/engine/common/requester/engine.go @@ -6,10 +6,11 @@ import ( "math/rand" "time" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/vmihailenco/msgpack" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" diff --git a/engine/common/splitter/engine.go b/engine/common/splitter/engine.go index 1a8c0c9681a..a054a497898 100644 --- a/engine/common/splitter/engine.go +++ b/engine/common/splitter/engine.go @@ -5,9 +5,10 @@ import ( "sync" "github.com/hashicorp/go-multierror" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" diff --git a/engine/common/splitter/engine_test.go b/engine/common/splitter/engine_test.go index c2445dc9231..613c1d0de8a 100644 --- a/engine/common/splitter/engine_test.go +++ b/engine/common/splitter/engine_test.go @@ -6,10 +6,11 @@ import ( "testing" "github.com/hashicorp/go-multierror" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine/common/splitter" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/utils/unittest" diff --git a/engine/common/splitter/network/example_test.go b/engine/common/splitter/network/example_test.go index b883ecb7bf3..16c0b94881c 100644 --- a/engine/common/splitter/network/example_test.go +++ b/engine/common/splitter/network/example_test.go @@ -4,9 +4,10 @@ import ( "fmt" "math/rand" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + splitterNetwork "github.com/onflow/flow-go/engine/common/splitter/network" "github.com/onflow/flow-go/model/flow" testnet "github.com/onflow/flow-go/utils/unittest/network" diff --git a/engine/common/splitter/network/network.go b/engine/common/splitter/network/network.go index a11a650ee2e..36c60de083a 100644 --- a/engine/common/splitter/network/network.go +++ b/engine/common/splitter/network/network.go @@ -7,9 +7,10 @@ import ( "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p-core/protocol" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + splitterEngine "github.com/onflow/flow-go/engine/common/splitter" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" diff --git a/engine/common/splitter/network/network_test.go b/engine/common/splitter/network/network_test.go index e6817092af7..1c18613cabe 100644 --- a/engine/common/splitter/network/network_test.go +++ b/engine/common/splitter/network/network_test.go @@ -3,11 +3,12 @@ package network_test import ( "testing" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + splitternetwork "github.com/onflow/flow-go/engine/common/splitter/network" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" diff --git a/engine/common/synchronization/engine.go b/engine/common/synchronization/engine.go index 5540dc2ef5a..523108ce353 100644 --- a/engine/common/synchronization/engine.go +++ b/engine/common/synchronization/engine.go @@ -8,9 +8,10 @@ import ( "time" "github.com/hashicorp/go-multierror" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/fifoqueue" "github.com/onflow/flow-go/model/chainsync" diff --git a/engine/common/synchronization/engine_test.go b/engine/common/synchronization/engine_test.go index 04eb52a90de..1bd0f9146a3 100644 --- a/engine/common/synchronization/engine_test.go +++ b/engine/common/synchronization/engine_test.go @@ -6,13 +6,14 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/events" diff --git a/engine/common/synchronization/request_handler.go b/engine/common/synchronization/request_handler.go index b5dabe4ac64..8f34dfc1b92 100644 --- a/engine/common/synchronization/request_handler.go +++ b/engine/common/synchronization/request_handler.go @@ -4,9 +4,10 @@ import ( "errors" "fmt" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" diff --git a/engine/common/synchronization/request_handler_engine.go b/engine/common/synchronization/request_handler_engine.go index 75ca9dd5d17..1f1de64f4ce 100644 --- a/engine/common/synchronization/request_handler_engine.go +++ b/engine/common/synchronization/request_handler_engine.go @@ -3,9 +3,10 @@ package synchronization import ( "fmt" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" diff --git a/engine/consensus/compliance/core_test.go b/engine/consensus/compliance/core_test.go index 98d59f4103c..7395d6f8634 100644 --- a/engine/consensus/compliance/core_test.go +++ b/engine/consensus/compliance/core_test.go @@ -6,12 +6,13 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + hotstuff "github.com/onflow/flow-go/consensus/hotstuff/mocks" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" diff --git a/engine/consensus/compliance/engine.go b/engine/consensus/compliance/engine.go index 1141f38ee85..4ab022377a2 100644 --- a/engine/consensus/compliance/engine.go +++ b/engine/consensus/compliance/engine.go @@ -6,9 +6,10 @@ import ( "fmt" "time" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/fifoqueue" diff --git a/engine/consensus/compliance/engine_test.go b/engine/consensus/compliance/engine_test.go index c98d1d5f724..f5bd71a7b17 100644 --- a/engine/consensus/compliance/engine_test.go +++ b/engine/consensus/compliance/engine_test.go @@ -6,11 +6,12 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" diff --git a/engine/consensus/dkg/messaging_engine.go b/engine/consensus/dkg/messaging_engine.go index e7a9fb5c8f7..207ed689d62 100644 --- a/engine/consensus/dkg/messaging_engine.go +++ b/engine/consensus/dkg/messaging_engine.go @@ -5,9 +5,10 @@ import ( "fmt" "time" - "github.com/onflow/flow-go/network/channels" "github.com/sethvargo/go-retry" + "github.com/onflow/flow-go/network/channels" + "github.com/rs/zerolog" "github.com/onflow/flow-go/engine" diff --git a/engine/consensus/dkg/messaging_engine_test.go b/engine/consensus/dkg/messaging_engine_test.go index 3261680b1f8..576f819435c 100644 --- a/engine/consensus/dkg/messaging_engine_test.go +++ b/engine/consensus/dkg/messaging_engine_test.go @@ -4,11 +4,12 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/network/channels" + msg "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/dkg" module "github.com/onflow/flow-go/module/mock" diff --git a/engine/consensus/ingestion/engine.go b/engine/consensus/ingestion/engine.go index 3205c242a48..ab1bf76207e 100644 --- a/engine/consensus/ingestion/engine.go +++ b/engine/consensus/ingestion/engine.go @@ -6,9 +6,10 @@ import ( "context" "fmt" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/fifoqueue" "github.com/onflow/flow-go/model/flow" diff --git a/engine/consensus/ingestion/engine_test.go b/engine/consensus/ingestion/engine_test.go index 64263994ce7..cfff0c972ca 100644 --- a/engine/consensus/ingestion/engine_test.go +++ b/engine/consensus/ingestion/engine_test.go @@ -8,12 +8,13 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "go.uber.org/atomic" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" diff --git a/engine/consensus/matching/engine.go b/engine/consensus/matching/engine.go index f9208458f50..2f0157f607b 100644 --- a/engine/consensus/matching/engine.go +++ b/engine/consensus/matching/engine.go @@ -3,9 +3,10 @@ package matching import ( "fmt" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/fifoqueue" diff --git a/engine/consensus/matching/engine_test.go b/engine/consensus/matching/engine_test.go index ee16f16201b..cce9a5eac96 100644 --- a/engine/consensus/matching/engine_test.go +++ b/engine/consensus/matching/engine_test.go @@ -5,11 +5,12 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" mockconsensus "github.com/onflow/flow-go/engine/consensus/mock" diff --git a/engine/consensus/provider/engine.go b/engine/consensus/provider/engine.go index 1d56ef42b8e..b77270029b1 100644 --- a/engine/consensus/provider/engine.go +++ b/engine/consensus/provider/engine.go @@ -5,9 +5,10 @@ package provider import ( "fmt" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" diff --git a/engine/consensus/sealing/engine.go b/engine/consensus/sealing/engine.go index cceefdf1264..ea52c271555 100644 --- a/engine/consensus/sealing/engine.go +++ b/engine/consensus/sealing/engine.go @@ -4,9 +4,10 @@ import ( "fmt" "github.com/gammazero/workerpool" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/engine" diff --git a/engine/consensus/sealing/engine_test.go b/engine/consensus/sealing/engine_test.go index 047e986e8f2..6f83e1ab243 100644 --- a/engine/consensus/sealing/engine_test.go +++ b/engine/consensus/sealing/engine_test.go @@ -7,10 +7,11 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" mockconsensus "github.com/onflow/flow-go/engine/consensus/mock" diff --git a/engine/execution/execution_test.go b/engine/execution/execution_test.go index 66953f72497..22a9bc1aec3 100644 --- a/engine/execution/execution_test.go +++ b/engine/execution/execution_test.go @@ -6,7 +6,6 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -14,6 +13,8 @@ import ( "github.com/vmihailenco/msgpack" "go.uber.org/atomic" + "github.com/onflow/flow-go/network/channels" + execTestutil "github.com/onflow/flow-go/engine/execution/testutil" "github.com/onflow/flow-go/engine/testutil" testmock "github.com/onflow/flow-go/engine/testutil/mock" diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 3a706263f06..b566d889549 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -8,10 +8,11 @@ import ( "strings" "time" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/rs/zerolog/log" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/engine" diff --git a/engine/execution/ingestion/engine_test.go b/engine/execution/ingestion/engine_test.go index a0c136bb0a3..8e57296b4bf 100644 --- a/engine/execution/ingestion/engine_test.go +++ b/engine/execution/ingestion/engine_test.go @@ -9,12 +9,13 @@ import ( "time" "github.com/golang/mock/gomock" - engineCommon "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + engineCommon "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/engine/execution" computation "github.com/onflow/flow-go/engine/execution/computation/mock" diff --git a/engine/execution/provider/engine.go b/engine/execution/provider/engine.go index 1ffffd31c45..7eddc4f8786 100644 --- a/engine/execution/provider/engine.go +++ b/engine/execution/provider/engine.go @@ -7,9 +7,10 @@ import ( "math/rand" "time" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/execution/state" "github.com/onflow/flow-go/model/flow" diff --git a/engine/ghost/client/ghost_client.go b/engine/ghost/client/ghost_client.go index 7cef5f361cd..e93b92f648a 100644 --- a/engine/ghost/client/ghost_client.go +++ b/engine/ghost/client/ghost_client.go @@ -6,9 +6,10 @@ import ( "fmt" "io" - "github.com/onflow/flow-go/network/channels" "google.golang.org/grpc" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/utils/unittest" ghost "github.com/onflow/flow-go/engine/ghost/protobuf" diff --git a/engine/ghost/engine/handler.go b/engine/ghost/engine/handler.go index 66e42ff57bc..99ae48b8241 100644 --- a/engine/ghost/engine/handler.go +++ b/engine/ghost/engine/handler.go @@ -5,11 +5,12 @@ import ( "fmt" "github.com/golang/protobuf/ptypes/empty" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "github.com/onflow/flow-go/network/channels" + ghost "github.com/onflow/flow-go/engine/ghost/protobuf" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" diff --git a/engine/ghost/engine/rpc.go b/engine/ghost/engine/rpc.go index 6d45f7d967a..0a4b647f720 100644 --- a/engine/ghost/engine/rpc.go +++ b/engine/ghost/engine/rpc.go @@ -4,10 +4,11 @@ import ( "fmt" "net" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "google.golang.org/grpc" + "github.com/onflow/flow-go/network/channels" + cborcodec "github.com/onflow/flow-go/network/codec/cbor" "github.com/onflow/flow-go/engine" @@ -85,7 +86,7 @@ func New(net network.Network, log zerolog.Logger, me module.Local, state protoco func registerConduits(net network.Network, state protocol.State, eng network.Engine) (map[channels.Channel]network.Conduit, error) { // create a list of all channels that don't change over time - channels := channels.ChannelList{ + channelList := channels.ChannelList{ channels.ConsensusCommittee, channels.SyncCommittee, channels.SyncExecution, @@ -115,17 +116,17 @@ func registerConduits(net network.Network, state protocol.State, eng network.Eng clusterID := cluster.RootBlock().Header.ChainID // add the dynamic channels for the cluster - channels = append( - channels, + channelList = append( + channelList, channels.ChannelConsensusCluster(clusterID), channels.ChannelSyncCluster(clusterID), ) } - conduitMap := make(map[channels.Channel]network.Conduit, len(channels)) + conduitMap := make(map[channels.Channel]network.Conduit, len(channelList)) // Register for ALL channels here and return a map of conduits - for _, e := range channels { + for _, e := range channelList { c, err := net.Register(e, eng) if err != nil { return nil, fmt.Errorf("could not register collection provider engine: %w", err) diff --git a/engine/testutil/nodes.go b/engine/testutil/nodes.go index 0c9cfd88376..f2cec55337b 100644 --- a/engine/testutil/nodes.go +++ b/engine/testutil/nodes.go @@ -8,11 +8,12 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/consensus" "github.com/onflow/flow-go/consensus/hotstuff" mockhotstuff "github.com/onflow/flow-go/consensus/hotstuff/mocks" diff --git a/engine/verification/requester/requester.go b/engine/verification/requester/requester.go index 2435aa6deed..75e9100ebcd 100644 --- a/engine/verification/requester/requester.go +++ b/engine/verification/requester/requester.go @@ -5,10 +5,11 @@ import ( "fmt" "time" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "golang.org/x/exp/rand" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/verification/fetcher" "github.com/onflow/flow-go/model/flow" diff --git a/engine/verification/requester/requester_test.go b/engine/verification/requester/requester_test.go index debaa452488..a6f5e79e20f 100644 --- a/engine/verification/requester/requester_test.go +++ b/engine/verification/requester/requester_test.go @@ -5,11 +5,12 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" testifymock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/network/channels" + mockfetcher "github.com/onflow/flow-go/engine/verification/fetcher/mock" "github.com/onflow/flow-go/engine/verification/requester" vertestutils "github.com/onflow/flow-go/engine/verification/utils/unittest" diff --git a/engine/verification/utils/unittest/helper.go b/engine/verification/utils/unittest/helper.go index c0ed62c1c80..184a0495a34 100644 --- a/engine/verification/utils/unittest/helper.go +++ b/engine/verification/utils/unittest/helper.go @@ -7,13 +7,14 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" testifymock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/engine/testutil" diff --git a/engine/verification/verifier/engine.go b/engine/verification/verifier/engine.go index 0b5442f3d02..4b60e42c94f 100644 --- a/engine/verification/verifier/engine.go +++ b/engine/verification/verifier/engine.go @@ -4,10 +4,11 @@ import ( "context" "fmt" - "github.com/onflow/flow-go/network/channels" "github.com/opentracing/opentracing-go/log" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/engine" diff --git a/engine/verification/verifier/engine_test.go b/engine/verification/verifier/engine_test.go index d170ae28fe4..858d6776629 100644 --- a/engine/verification/verifier/engine_test.go +++ b/engine/verification/verifier/engine_test.go @@ -6,13 +6,14 @@ import ( "fmt" "testing" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" testifymock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/engine/testutil/mocklocal" diff --git a/insecure/attacknetwork/attackNetwork.go b/insecure/attacknetwork/attackNetwork.go index aee2042c171..9790cd93451 100644 --- a/insecure/attacknetwork/attackNetwork.go +++ b/insecure/attacknetwork/attackNetwork.go @@ -8,10 +8,11 @@ import ( "github.com/golang/protobuf/ptypes/empty" "github.com/hashicorp/go-multierror" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "google.golang.org/grpc" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/insecure" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/component" diff --git a/insecure/corruptible/conduit_test.go b/insecure/corruptible/conduit_test.go index f641aefd0a9..f9b08b92415 100644 --- a/insecure/corruptible/conduit_test.go +++ b/insecure/corruptible/conduit_test.go @@ -6,9 +6,10 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/insecure" mockinsecure "github.com/onflow/flow-go/insecure/mock" "github.com/onflow/flow-go/utils/unittest" diff --git a/insecure/corruptible/factory.go b/insecure/corruptible/factory.go index 8d362ffc331..04ed39bdc3b 100644 --- a/insecure/corruptible/factory.go +++ b/insecure/corruptible/factory.go @@ -9,11 +9,12 @@ import ( "sync" "github.com/golang/protobuf/ptypes/empty" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "google.golang.org/grpc" grpcinsecure "google.golang.org/grpc/credentials/insecure" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/engine/execution/ingestion" "github.com/onflow/flow-go/engine/execution/state/delta" diff --git a/insecure/corruptible/factory_test.go b/insecure/corruptible/factory_test.go index 7063f85919a..f2c27ce9128 100644 --- a/insecure/corruptible/factory_test.go +++ b/insecure/corruptible/factory_test.go @@ -9,12 +9,13 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" testifymock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "google.golang.org/grpc" grpcinsecure "google.golang.org/grpc/credentials/insecure" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine/testutil" "github.com/onflow/flow-go/insecure" "github.com/onflow/flow-go/model/flow" diff --git a/insecure/fixtures.go b/insecure/fixtures.go index 3f1df818555..d8bdbddd5cf 100644 --- a/insecure/fixtures.go +++ b/insecure/fixtures.go @@ -5,9 +5,10 @@ import ( "math/rand" "testing" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/network" diff --git a/insecure/integration/test/composability_test.go b/insecure/integration/test/composability_test.go index 053ea981134..aa8febe2991 100644 --- a/insecure/integration/test/composability_test.go +++ b/insecure/integration/test/composability_test.go @@ -7,9 +7,10 @@ import ( "testing" "time" - flownet "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/require" + flownet "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine/testutil" "github.com/onflow/flow-go/insecure" "github.com/onflow/flow-go/insecure/attacknetwork" diff --git a/insecure/wintermute/attackOrchestrator.go b/insecure/wintermute/attackOrchestrator.go index 1e6ea1359d8..01fc07169cd 100644 --- a/insecure/wintermute/attackOrchestrator.go +++ b/insecure/wintermute/attackOrchestrator.go @@ -4,9 +4,10 @@ import ( "fmt" "sync" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/insecure" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" diff --git a/insecure/wintermute/attackOrchestrator_test.go b/insecure/wintermute/attackOrchestrator_test.go index 351dc2e2c4a..573fde600a5 100644 --- a/insecure/wintermute/attackOrchestrator_test.go +++ b/insecure/wintermute/attackOrchestrator_test.go @@ -5,10 +5,11 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/insecure" mockinsecure "github.com/onflow/flow-go/insecure/mock" "github.com/onflow/flow-go/model/flow" diff --git a/insecure/wintermute/helpers.go b/insecure/wintermute/helpers.go index b6e78839c77..896dc2cebe8 100644 --- a/insecure/wintermute/helpers.go +++ b/insecure/wintermute/helpers.go @@ -3,9 +3,10 @@ package wintermute import ( "testing" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/engine/testutil" enginemock "github.com/onflow/flow-go/engine/testutil/mock" "github.com/onflow/flow-go/insecure" diff --git a/module/metrics/example/collection/main.go b/module/metrics/example/collection/main.go index d1cafed3f83..c0fb3ab2cde 100644 --- a/module/metrics/example/collection/main.go +++ b/module/metrics/example/collection/main.go @@ -4,9 +4,10 @@ import ( "math/rand" "time" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/metrics/example" "github.com/onflow/flow-go/module/trace" diff --git a/module/metrics/example/consensus/main.go b/module/metrics/example/consensus/main.go index 511140772b3..8b559fbd187 100644 --- a/module/metrics/example/consensus/main.go +++ b/module/metrics/example/consensus/main.go @@ -5,10 +5,11 @@ import ( "math/rand" "time" - "github.com/onflow/flow-go/network/channels" "github.com/prometheus/client_golang/prometheus" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/metrics/example" diff --git a/network/cache/rcvcache_test.go b/network/cache/rcvcache_test.go index 4f4723af074..7deecb908b7 100644 --- a/network/cache/rcvcache_test.go +++ b/network/cache/rcvcache_test.go @@ -6,11 +6,12 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/module/metrics" diff --git a/network/channels/channel.go b/network/channels/channel.go index 920fe530522..10a9c92ea9a 100644 --- a/network/channels/channel.go +++ b/network/channels/channel.go @@ -8,7 +8,7 @@ import ( // Channel specifies a virtual and isolated communication medium. // Nodes subscribed to the same channel can disseminate epidemic messages among -// each other, i.e.. multicast and publish. +// each other, i.e: multicast and publish. type Channel string type ChannelList []Channel diff --git a/network/middleware.go b/network/middleware.go index 0eadc5a6c23..0e39dfb2ecb 100644 --- a/network/middleware.go +++ b/network/middleware.go @@ -6,6 +6,7 @@ import ( "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/protocol" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/network/channels" diff --git a/network/network.go b/network/network.go index 57c8f02576f..c087031c285 100644 --- a/network/network.go +++ b/network/network.go @@ -3,6 +3,7 @@ package network import ( "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p-core/protocol" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/model/flow" diff --git a/network/p2p/dht_test.go b/network/p2p/dht_test.go index 58e0bd4f2a5..e7fce6d88d6 100644 --- a/network/p2p/dht_test.go +++ b/network/p2p/dht_test.go @@ -9,10 +9,11 @@ import ( "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/network/channels" + libp2pmsg "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" diff --git a/network/p2p/libp2pNode.go b/network/p2p/libp2pNode.go index 4154e4915cd..f314bf79b75 100644 --- a/network/p2p/libp2pNode.go +++ b/network/p2p/libp2pNode.go @@ -16,9 +16,10 @@ import ( dht "github.com/libp2p/go-libp2p-kad-dht" kbucket "github.com/libp2p/go-libp2p-kbucket" pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + flownet "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/p2p/unicast" validator "github.com/onflow/flow-go/network/validator/pubsub" diff --git a/network/p2p/libp2pNodeBuilder.go b/network/p2p/libp2pNodeBuilder.go index 04bc4a89e32..ecb6891c69c 100644 --- a/network/p2p/libp2pNodeBuilder.go +++ b/network/p2p/libp2pNodeBuilder.go @@ -19,9 +19,10 @@ import ( "github.com/libp2p/go-tcp-transport" "github.com/multiformats/go-multiaddr" madns "github.com/multiformats/go-multiaddr-dns" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + fcrypto "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/flow" diff --git a/network/p2p/middleware.go b/network/p2p/middleware.go index 96dbf28f72d..3e21f428b97 100644 --- a/network/p2p/middleware.go +++ b/network/p2p/middleware.go @@ -17,9 +17,10 @@ import ( "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peerstore" "github.com/libp2p/go-libp2p-core/protocol" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" diff --git a/network/p2p/network.go b/network/p2p/network.go index d756cd19704..a212a063c6a 100644 --- a/network/p2p/network.go +++ b/network/p2p/network.go @@ -10,9 +10,10 @@ import ( "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/protocol" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/flow" @@ -87,8 +88,8 @@ type registerEngineResp struct { } type registerBlobServiceRequest struct { - channel channels.Channel - ds datastore.Batching + channel channels.Channel + ds datastore.Batching opts []network.BlobServiceOption respChan chan *registerBlobServiceResp } diff --git a/network/p2p/sporking_test.go b/network/p2p/sporking_test.go index 590794062dd..a166ace8983 100644 --- a/network/p2p/sporking_test.go +++ b/network/p2p/sporking_test.go @@ -8,10 +8,11 @@ import ( "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peerstore" pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/network/channels" + libp2pmessage "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/network/message" diff --git a/network/p2p/subscription_filter.go b/network/p2p/subscription_filter.go index aea7fa6c8cc..bba023afbda 100644 --- a/network/p2p/subscription_filter.go +++ b/network/p2p/subscription_filter.go @@ -3,6 +3,7 @@ package p2p import ( "github.com/libp2p/go-libp2p-core/peer" pb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/model/flow" diff --git a/network/p2p/subscription_filter_test.go b/network/p2p/subscription_filter_test.go index d9032cc3adc..ef2919499f1 100644 --- a/network/p2p/subscription_filter_test.go +++ b/network/p2p/subscription_filter_test.go @@ -8,10 +8,11 @@ import ( "github.com/libp2p/go-libp2p-core/host" pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/id" "github.com/onflow/flow-go/network/p2p" diff --git a/network/p2p/topic_validator_test.go b/network/p2p/topic_validator_test.go index 0791fb8dd8d..d72485541df 100644 --- a/network/p2p/topic_validator_test.go +++ b/network/p2p/topic_validator_test.go @@ -11,10 +11,11 @@ import ( "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/peer" pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/network/message" diff --git a/network/proxy/network_test.go b/network/proxy/network_test.go index 095ceb89bd8..40f9cd69469 100644 --- a/network/proxy/network_test.go +++ b/network/proxy/network_test.go @@ -3,10 +3,11 @@ package proxy_test import ( "testing" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" diff --git a/network/relay/network.go b/network/relay/network.go index 0ea4367593a..37b1b6f208a 100644 --- a/network/relay/network.go +++ b/network/relay/network.go @@ -5,9 +5,10 @@ import ( "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p-core/protocol" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/util" "github.com/onflow/flow-go/network" @@ -16,8 +17,8 @@ import ( type RelayNetwork struct { originNet network.Network destinationNet network.Network - logger zerolog.Logger - channels channels.ChannelList + logger zerolog.Logger + channels channels.ChannelList } var _ network.Network = (*RelayNetwork)(nil) diff --git a/network/relay/relayer.go b/network/relay/relayer.go index 6d676e0287c..d64028ffae6 100644 --- a/network/relay/relayer.go +++ b/network/relay/relayer.go @@ -3,9 +3,10 @@ package relay import ( "fmt" - "github.com/onflow/flow-go/network/channels" "golang.org/x/sync/errgroup" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" ) diff --git a/network/stub/network.go b/network/stub/network.go index c976cdc9eeb..966ee1f7c1a 100644 --- a/network/stub/network.go +++ b/network/stub/network.go @@ -7,10 +7,11 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/channels" "github.com/pkg/errors" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index aad1051908a..aff2567a05c 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -9,9 +9,10 @@ import ( "github.com/libp2p/go-libp2p-core/peer" pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/message" - "github.com/rs/zerolog" "github.com/onflow/flow-go/network" diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index 94d227120e4..7b61396fa68 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -4,9 +4,10 @@ import ( "fmt" "testing" + "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/message" - "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/model/messages" @@ -15,9 +16,9 @@ import ( ) type TestCase struct { - Identity *flow.Identity - Channel channels.Channel - Message interface{} + Identity *flow.Identity + Channel channels.Channel + Message interface{} MessageStr string } diff --git a/utils/unittest/network/network.go b/utils/unittest/network/network.go index 100ea9cea46..900139752be 100644 --- a/utils/unittest/network/network.go +++ b/utils/unittest/network/network.go @@ -3,9 +3,10 @@ package network import ( "fmt" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/mock" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" From 14c8ceebca0198b807e1ae408ff2e4fa185e941e Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 28 Jun 2022 09:33:17 -0400 Subject: [PATCH 036/223] move network/slashing_violations_consumer -> network/slashing package - move network/slashing_violations_consumer -> network/slashing package - rename slashing_violations_consumer -> slashing/violations_consumer - add const strings for offenses - log offenses - use Hex format for node ID --- .../violations_consumer.go} | 19 +++++++++++++++---- .../pubsub/authorized_sender_validator.go | 6 +++--- 2 files changed, 18 insertions(+), 7 deletions(-) rename network/{slashing_violations_consumer.go => slashing/violations_consumer.go} (75%) diff --git a/network/slashing_violations_consumer.go b/network/slashing/violations_consumer.go similarity index 75% rename from network/slashing_violations_consumer.go rename to network/slashing/violations_consumer.go index db54278afdc..f6a2411ad20 100644 --- a/network/slashing_violations_consumer.go +++ b/network/slashing/violations_consumer.go @@ -1,11 +1,19 @@ -package network +package slashing import ( "github.com/rs/zerolog" + "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/model/flow" ) +const ( + unAuthorizedSenderViolation = "unauthorized_sender" + unknownMsgTypeViolation = "unknown_message_type" + senderEjectedViolation = "sender_ejected" +) + // SlashingViolationsConsumer is a struct that logs a message for any slashable offences. // This struct will be updated in the future when slashing is implemented. type SlashingViolationsConsumer struct { @@ -23,8 +31,9 @@ func (c *SlashingViolationsConsumer) OnUnAuthorizedSenderError(identity *flow.Id Err(err). Str("peer_id", peerID). Str("role", identity.Role.String()). - Str("peer_node_id", identity.NodeID.String()). + Hex("sender_id", logging.ID(identity.NodeID)). Str("message_type", msgType). + Str("offense", unAuthorizedSenderViolation). Msg("potential slashable offense") } @@ -34,8 +43,9 @@ func (c *SlashingViolationsConsumer) OnUnknownMsgTypeError(identity *flow.Identi Err(err). Str("peer_id", peerID). Str("role", identity.Role.String()). - Str("peer_node_id", identity.NodeID.String()). + Hex("sender_id", logging.ID(identity.NodeID)). Str("message_type", msgType). + Str("offense", unknownMsgTypeViolation). Msg("potential slashable offense") } @@ -45,7 +55,8 @@ func (c *SlashingViolationsConsumer) OnSenderEjectedError(identity *flow.Identit Err(err). Str("peer_id", peerID). Str("role", identity.Role.String()). - Str("peer_node_id", identity.NodeID.String()). + Hex("sender_id", logging.ID(identity.NodeID)). Str("message_type", msgType). + Str("offense", senderEjectedViolation). Msg("potential slashable offense") } diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index aff2567a05c..ba3911905c8 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -11,11 +11,11 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/slashing" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/message" - "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/model/flow" ) @@ -36,7 +36,7 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel channels.Channel, get Str("network_channel", channel.String()). Logger() - slashingViolationsConsumer := network.NewSlashingViolationsConsumer(log) + slashingViolationsConsumer := slashing.NewSlashingViolationsConsumer(log) return func(ctx context.Context, from peer.ID, msg interface{}) pubsub.ValidationResult { identity, ok := getIdentity(from) From 3c64a8c726a8761cdc09eff4cd900293fe7d4193 Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Tue, 28 Jun 2022 10:38:01 -0400 Subject: [PATCH 037/223] remove time.Sleep --- engine/common/synchronization/engine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/common/synchronization/engine.go b/engine/common/synchronization/engine.go index 2798d681cda..81114d9cdf4 100644 --- a/engine/common/synchronization/engine.go +++ b/engine/common/synchronization/engine.go @@ -313,7 +313,7 @@ func (e *Engine) onBlockResponse(originID flow.Identifier, res *messages.BlockRe } // tempfix: to help nodes falling far behind to catch up. // it avoids the race condition in compliance engine and hotstuff to validate blocks - time.Sleep(150 * time.Millisecond) + // time.Sleep(150 * time.Millisecond) e.comp.SubmitLocal(synced) } } From f848585d4d1623be7953c851c92ad7fb53e19909 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 28 Jun 2022 12:36:17 -0400 Subject: [PATCH 038/223] test all possible combinations of invalid message on channel returns expected error - rename sadTestCases -> unhappyTestCases - pass correct message in sender ejected test - update unknown message type test input - add test case checks if ErrUnauthorizedMessageOnChannel is returned correctly - remove Channel prefix from SyncCluster and ConsensusCluster funcs - test all possible combinations of invalid message on channel returns expected error --- engine/collection/compliance/engine.go | 2 +- engine/collection/compliance/engine_test.go | 2 +- engine/collection/synchronization/engine.go | 2 +- .../collection/synchronization/engine_test.go | 2 +- engine/ghost/engine/rpc.go | 4 +- network/channels/channels.go | 12 +-- network/channels/channels_test.go | 6 +- network/message/authorization.go | 12 +-- network/p2p/subscription_filter_test.go | 2 +- network/p2p/topic_validator_test.go | 2 +- network/test/echoengine_test.go | 2 +- network/topology/randomizedTopology_test.go | 2 +- network/topology/topicBasedTopology_test.go | 2 +- .../pubsub/authorized_sender_validator.go | 2 +- .../authorized_sender_validator_test.go | 80 ++++++++++++++----- 15 files changed, 84 insertions(+), 50 deletions(-) diff --git a/engine/collection/compliance/engine.go b/engine/collection/compliance/engine.go index 629d4ad3546..35cd5e69964 100644 --- a/engine/collection/compliance/engine.go +++ b/engine/collection/compliance/engine.go @@ -173,7 +173,7 @@ func NewEngine( } // register network conduit - conduit, err := net.Register(channels.ChannelConsensusCluster(chainID), eng) + conduit, err := net.Register(channels.ConsensusCluster(chainID), eng) if err != nil { return nil, fmt.Errorf("could not register engine: %w", err) } diff --git a/engine/collection/compliance/engine_test.go b/engine/collection/compliance/engine_test.go index e20d7b6e4b5..f669f947bdd 100644 --- a/engine/collection/compliance/engine_test.go +++ b/engine/collection/compliance/engine_test.go @@ -252,7 +252,7 @@ func (cs *ComplianceSuite) TestSubmittingMultipleEntries() { originID := unittest.IdentifierFixture() voteCount := 15 - channel := channels.ChannelConsensusCluster(cs.clusterID) + channel := channels.ConsensusCluster(cs.clusterID) var wg sync.WaitGroup wg.Add(1) diff --git a/engine/collection/synchronization/engine.go b/engine/collection/synchronization/engine.go index 4c8eaf32872..2c3a1ad1070 100644 --- a/engine/collection/synchronization/engine.go +++ b/engine/collection/synchronization/engine.go @@ -107,7 +107,7 @@ func New( } // register the engine with the network layer and store the conduit - con, err := net.Register(channels.ChannelSyncCluster(chainID), e) + con, err := net.Register(channels.SyncCluster(chainID), e) if err != nil { return nil, fmt.Errorf("could not register engine: %w", err) } diff --git a/engine/collection/synchronization/engine_test.go b/engine/collection/synchronization/engine_test.go index a88616b4aca..5c93e947dd3 100644 --- a/engine/collection/synchronization/engine_test.go +++ b/engine/collection/synchronization/engine_test.go @@ -74,7 +74,7 @@ func (ss *SyncSuite) SetupTest() { // set up the network module mock ss.net = &mocknetwork.Network{} - ss.net.On("Register", channels.ChannelSyncCluster(clusterID), mock.Anything).Return( + ss.net.On("Register", channels.SyncCluster(clusterID), mock.Anything).Return( func(network channels.Channel, engine netint.MessageProcessor) netint.Conduit { return ss.con }, diff --git a/engine/ghost/engine/rpc.go b/engine/ghost/engine/rpc.go index 0a4b647f720..5909c9dba63 100644 --- a/engine/ghost/engine/rpc.go +++ b/engine/ghost/engine/rpc.go @@ -118,8 +118,8 @@ func registerConduits(net network.Network, state protocol.State, eng network.Eng // add the dynamic channels for the cluster channelList = append( channelList, - channels.ChannelConsensusCluster(clusterID), - channels.ChannelSyncCluster(clusterID), + channels.ConsensusCluster(clusterID), + channels.SyncCluster(clusterID), ) } diff --git a/network/channels/channels.go b/network/channels/channels.go index 216dcf06c9a..a71cf612ca7 100644 --- a/network/channels/channels.go +++ b/network/channels/channels.go @@ -113,11 +113,11 @@ const ( // Channels for consensus protocols ConsensusCommittee = Channel("consensus-committee") - ConsensusClusterPrefix = "consensus-cluster" // dynamic channel, use ChannelConsensusCluster function + ConsensusClusterPrefix = "consensus-cluster" // dynamic channel, use ConsensusCluster function // Channels for protocols actively synchronizing state across nodes SyncCommittee = Channel("sync-committee") - SyncClusterPrefix = "sync-cluster" // dynamic channel, use ChannelSyncCluster function + SyncClusterPrefix = "sync-cluster" // dynamic channel, use SyncCluster function SyncExecution = Channel("sync-execution") // Channels for dkg communication @@ -262,14 +262,14 @@ func ChannelFromTopic(topic Topic) (Channel, bool) { return "", false } -// ChannelConsensusCluster returns a dynamic cluster consensus channel based on +// ConsensusCluster returns a dynamic cluster consensus channel based on // the chain ID of the cluster in question. -func ChannelConsensusCluster(clusterID flow.ChainID) Channel { +func ConsensusCluster(clusterID flow.ChainID) Channel { return Channel(fmt.Sprintf("%s-%s", ConsensusClusterPrefix, clusterID)) } -// ChannelSyncCluster returns a dynamic cluster sync channel based on the chain +// SyncCluster returns a dynamic cluster sync channel based on the chain // ID of the cluster in question. -func ChannelSyncCluster(clusterID flow.ChainID) Channel { +func SyncCluster(clusterID flow.ChainID) Channel { return Channel(fmt.Sprintf("%s-%s", SyncClusterPrefix, clusterID)) } diff --git a/network/channels/channels_test.go b/network/channels/channels_test.go index 19cd56eb64c..36bae3b14e5 100644 --- a/network/channels/channels_test.go +++ b/network/channels/channels_test.go @@ -35,7 +35,7 @@ func TestGetRolesByChannel_NonClusterChannel(t *testing.T) { // operates on top of channelRoleMap, and correctly identifies and strips of the cluster channel. func TestGetRolesByChannel_ClusterChannel(t *testing.T) { // creates a cluster channel. - conClusterChannel := ChannelConsensusCluster("some-consensus-cluster-id") + conClusterChannel := ConsensusCluster("some-consensus-cluster-id") // the roles list should contain collection roles, ok := RolesByChannel(conClusterChannel) @@ -74,12 +74,12 @@ func TestGetChannelByRole(t *testing.T) { // against cluster and non-cluster channel. func TestIsClusterChannel(t *testing.T) { // creates a consensus cluster channel and verifies it - conClusterChannel := ChannelConsensusCluster("some-consensus-cluster-id") + conClusterChannel := ConsensusCluster("some-consensus-cluster-id") ok := IsClusterChannel(conClusterChannel) require.True(t, ok) // creates a sync cluster channel and verifies it - syncClusterChannel := ChannelSyncCluster("some-sync-cluster-id") + syncClusterChannel := SyncCluster("some-sync-cluster-id") ok = IsClusterChannel(syncClusterChannel) require.True(t, ok) diff --git a/network/message/authorization.go b/network/message/authorization.go index a5e3b2f82c8..c5cc7a947a7 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -30,7 +30,7 @@ type MsgAuthConfig struct { func (m MsgAuthConfig) IsAuthorized(role flow.Role, channel channels.Channel) error { authorizedRoles, ok := m.Config[channel] if !ok { - return fmt.Errorf("could not get authorization config for message type (%s) on channel (%s): %w", m.Name, channel, ErrUnauthorizedMessageOnChannel) + return ErrUnauthorizedMessageOnChannel } if !authorizedRoles.Contains(role) { @@ -201,16 +201,6 @@ func initializeMessageAuthConfigsMap() { }, } - // [deprecated] execution state synchronization - AuthorizationConfigs[ExecutionStateSyncRequest] = MsgAuthConfig{ - Name: ExecutionStateSyncRequest, - Config: nil, - } - AuthorizationConfigs[ExecutionStateDelta] = MsgAuthConfig{ - Name: ExecutionStateDelta, - Config: nil, - } - // data exchange for execution of blocks AuthorizationConfigs[ChunkDataRequest] = MsgAuthConfig{ Name: ChunkDataRequest, diff --git a/network/p2p/subscription_filter_test.go b/network/p2p/subscription_filter_test.go index ef2919499f1..9095634f10f 100644 --- a/network/p2p/subscription_filter_test.go +++ b/network/p2p/subscription_filter_test.go @@ -130,7 +130,7 @@ func TestCanSubscribe(t *testing.T) { _, err = collectionNode.Subscribe(badTopic, unittest.NetworkCodec(), unittest.AllowAllPeerFilter()) require.Error(t, err) - clusterTopic := channels.TopicFromChannel(channels.ChannelSyncCluster(flow.Emulator), sporkId) + clusterTopic := channels.TopicFromChannel(channels.SyncCluster(flow.Emulator), sporkId) _, err = collectionNode.Subscribe(clusterTopic, unittest.NetworkCodec(), unittest.AllowAllPeerFilter()) require.NoError(t, err) } diff --git a/network/p2p/topic_validator_test.go b/network/p2p/topic_validator_test.go index d72485541df..f20542748ed 100644 --- a/network/p2p/topic_validator_test.go +++ b/network/p2p/topic_validator_test.go @@ -439,7 +439,7 @@ func TestAuthorizedSenderValidator_ClusterChannel(t *testing.T) { ln2, identity2 := nodeFixture(t, context.Background(), sporkId, "collection_2", withRole(flow.RoleCollection)) ln3, identity3 := nodeFixture(t, context.Background(), sporkId, "collection_3", withRole(flow.RoleCollection)) - channel := channels.ChannelSyncCluster(flow.Testnet) + channel := channels.SyncCluster(flow.Testnet) topic := channels.TopicFromChannel(channel, sporkId) ids := flow.IdentityList{&identity1, &identity2, &identity3} diff --git a/network/test/echoengine_test.go b/network/test/echoengine_test.go index bc3c215c9ed..991509e9c5d 100644 --- a/network/test/echoengine_test.go +++ b/network/test/echoengine_test.go @@ -76,7 +76,7 @@ func (suite *EchoEngineTestSuite) TestUnknownChannel() { func (suite *EchoEngineTestSuite) TestClusterChannel() { e := NewEchoEngine(suite.T(), suite.nets[0], 1, channels.TestNetworkChannel, false, suite.Unicast) // creates a cluster channel - clusterChannel := channels.ChannelSyncCluster(flow.Testnet) + clusterChannel := channels.SyncCluster(flow.Testnet) // registers engine with cluster channel _, err := suite.nets[0].Register(clusterChannel, e) // registering cluster channel should not cause an error diff --git a/network/topology/randomizedTopology_test.go b/network/topology/randomizedTopology_test.go index 1a9d1e13a4d..1a39ff617a3 100644 --- a/network/topology/randomizedTopology_test.go +++ b/network/topology/randomizedTopology_test.go @@ -151,7 +151,7 @@ func (suite *RandomizedTopologyTestSuite) TestConnectedness_NonClusterChannel() // cluster channel are individually connected. func (suite *RandomizedTopologyTestSuite) TestConnectedness_ClusterChannel() { // picks one cluster channel as sample - channel := channels.ChannelSyncCluster(flow.Emulator) + channel := channels.SyncCluster(flow.Emulator) // adjacency map keeps graph component of a single channel channelAdjMap := make(map[flow.Identifier]flow.IdentityList) diff --git a/network/topology/topicBasedTopology_test.go b/network/topology/topicBasedTopology_test.go index b18402d1acb..724be3c5b6e 100644 --- a/network/topology/topicBasedTopology_test.go +++ b/network/topology/topicBasedTopology_test.go @@ -196,7 +196,7 @@ func (suite *TopicAwareTopologyTestSuite) TestConnectedness_NonClusterChannel() // cluster channel are individually connected. func (suite *TopicAwareTopologyTestSuite) TestConnectedness_ClusterChannel() { // picks one cluster channel as sample - channel := channels.ChannelSyncCluster(flow.Emulator) + channel := channels.SyncCluster(flow.Emulator) // adjacency map keeps graph component of a single channel channelAdjMap := make(map[flow.Identifier]flow.IdentityList) diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index ba3911905c8..299e202efd5 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -98,7 +98,7 @@ func IsAuthorizedSender(identity *flow.Identity, channel channels.Channel, msg i } if err := conf.IsAuthorized(identity.Role, channel); err != nil { - return conf.Name, fmt.Errorf("%s: %w", err, ErrUnauthorizedSender) + return conf.Name, fmt.Errorf("%w: %s", err, ErrUnauthorizedSender) } return conf.Name, nil diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index 7b61396fa68..d6362bb1f58 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -28,18 +28,20 @@ func TestIsAuthorizedSender(t *testing.T) { type TestIsAuthorizedSenderSuite struct { suite.Suite - happyPathTestCases []TestCase - sadPathTestCases []TestCase + authorizedSenderTestCases []TestCase + unauthorizedSenderTestCases []TestCase + unauthorizedMessageOnChannelTestCases []TestCase } func (s *TestIsAuthorizedSenderSuite) SetupTest() { - s.initializeTestCases() + s.initializeAuthorizationTestCases() + s.initializeInvalidMessageOnChannelTestCases() } // TestIsAuthorizedSender_AuthorizedSender checks that IsAuthorizedSender does not return false positive // validation errors for all possible valid combinations (authorized sender role, message type). func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_AuthorizedSender() { - for _, c := range s.happyPathTestCases { + for _, c := range s.authorizedSenderTestCases { str := fmt.Sprintf("role (%s) should be authorized to send message type (%s) on channel (%s)", c.Identity.Role, c.MessageStr, c.Channel) s.Run(str, func() { msgType, err := IsAuthorizedSender(c.Identity, c.Channel, c.Message) @@ -52,34 +54,50 @@ func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_AuthorizedSender() // TestIsAuthorizedSender_UnAuthorizedSender checks that IsAuthorizedSender return's ErrUnauthorizedSender // validation error for all possible invalid combinations (unauthorized sender role, message type). func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_UnAuthorizedSender() { - for _, c := range s.sadPathTestCases { + for _, c := range s.unauthorizedSenderTestCases { str := fmt.Sprintf("role (%s) should not be authorized to send message type (%s) on channel (%s)", c.Identity.Role, c.MessageStr, c.Channel) s.Run(str, func() { msgType, err := IsAuthorizedSender(c.Identity, c.Channel, c.Message) - s.Require().ErrorIs(err, ErrUnauthorizedSender) + s.Require().ErrorIs(err, message.ErrUnauthorizedRole) s.Require().Equal(c.MessageStr, msgType) }) } } +// TestIsAuthorizedSender_UnAuthorizedSender for each invalid combination of message type and channel +// an appropriate error message.ErrUnauthorizedMessageOnChannel is returned. +func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_UnAuthorizedMessageOnChannel() { + for _, c := range s.unauthorizedMessageOnChannelTestCases { + str := fmt.Sprintf("message type (%s) should not be authorized to be sent on channel (%s)", c.MessageStr, c.Channel) + s.Run(str, func() { + msgType, err := IsAuthorizedSender(c.Identity, c.Channel, c.Message) + s.Require().ErrorIs(err, message.ErrUnauthorizedMessageOnChannel) + s.Require().Equal(c.MessageStr, msgType) + }) + } +} + +// TestIsAuthorizedSender_ClusterPrefixedChannels checks that IsAuthorizedSender correctly +// handles cluster prefixed channels during validation. +func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_ClusterPrefixedChannels() { + identity := unittest.IdentityFixture(unittest.WithRole(flow.RoleCollection)) + clusterID := flow.Localnet + msgType, err := IsAuthorizedSender(identity, channels.ConsensusCluster(clusterID), &messages.ClusterBlockResponse{}) + s.Require().NoError(err) + s.Require().Equal(message.ClusterBlockResponse, msgType) +} + // TestIsAuthorizedSender_ValidationFailure checks that IsAuthorizedSender returns the expected validation error. func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_ValidationFailure() { s.Run("sender is ejected", func() { identity := unittest.IdentityFixture() identity.Ejected = true - msgType, err := IsAuthorizedSender(identity, channels.Channel(""), nil) + msgType, err := IsAuthorizedSender(identity, channels.SyncCommittee, &messages.SyncRequest{}) s.Require().ErrorIs(err, ErrSenderEjected) s.Require().Equal("", msgType) }) s.Run("unknown message type", func() { - identity := unittest.IdentityFixture() - msgType, err := IsAuthorizedSender(identity, channels.Channel(""), nil) - s.Require().ErrorIs(err, ErrUnknownMessageType) - s.Require().Equal("", msgType) - }) - - s.Run("unknown message type with message embedded", func() { identity := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus)) type msg struct { *messages.BlockProposal @@ -93,11 +111,15 @@ func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_ValidationFailure() msgType, err := IsAuthorizedSender(identity, channels.ConsensusCommittee, m) s.Require().ErrorIs(err, ErrUnknownMessageType) s.Require().Equal("", msgType) + + msgType, err = IsAuthorizedSender(identity, channels.ConsensusCommittee, nil) + s.Require().ErrorIs(err, ErrUnknownMessageType) + s.Require().Equal("", msgType) }) } -// initializeTestCases initializes happy and sad path test cases for checking authorized and unauthorized role message combinations. -func (s *TestIsAuthorizedSenderSuite) initializeTestCases() { +// initializeAuthorizationTestCases initializes happy and sad path test cases for checking authorized and unauthorized role message combinations. +func (s *TestIsAuthorizedSenderSuite) initializeAuthorizationTestCases() { for _, c := range message.AuthorizationConfigs { for channel, authorizedRoles := range c.Config { for _, role := range flow.Roles() { @@ -111,10 +133,32 @@ func (s *TestIsAuthorizedSenderSuite) initializeTestCases() { if authorizedRoles.Contains(role) { // test cases for validation success happy path - s.happyPathTestCases = append(s.happyPathTestCases, tc) + s.authorizedSenderTestCases = append(s.authorizedSenderTestCases, tc) } else { // test cases for validation unsuccessful sad path - s.sadPathTestCases = append(s.sadPathTestCases, tc) + s.unauthorizedSenderTestCases = append(s.unauthorizedSenderTestCases, tc) + } + } + } + } +} + +// initializeInvalidMessageOnChannelTestCases initializes test cases for all possible combinations of invalid message types on channel. +// NOTE: the role in the test case does not matter since ErrUnauthorizedMessageOnChannel will be returned before the role is checked. +func (s *TestIsAuthorizedSenderSuite) initializeInvalidMessageOnChannelTestCases() { + for _, c := range message.AuthorizationConfigs { + for channel, authorizedRoles := range c.Config { + identity := unittest.IdentityFixture(unittest.WithRole(authorizedRoles[0])) + for _, config := range message.AuthorizationConfigs { + _, ok := config.Config[channel] + if config.Name != c.Name && !ok { + tc := TestCase{ + Identity: identity, + Channel: channel, + Message: config.Type(), + MessageStr: config.Name, + } + s.unauthorizedMessageOnChannelTestCases = append(s.unauthorizedMessageOnChannelTestCases, tc) } } } From dbc9b373222bc61d401e8a29d47ca789f3fbc147 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 28 Jun 2022 12:50:32 -0400 Subject: [PATCH 039/223] remove misconfigured channels in chunk data req/resp configs --- network/message/authorization.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index c5cc7a947a7..cbf8e95179a 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -208,10 +208,7 @@ func initializeMessageAuthConfigsMap() { return new(messages.ChunkDataRequest) }, Config: map[channels.Channel]flow.RoleList{ - channels.ProvideChunks: {flow.RoleVerification}, // channel alias RequestChunks = ProvideChunks - channels.RequestCollections: {flow.RoleVerification}, - channels.RequestApprovalsByChunk: {flow.RoleVerification}, - channels.RequestReceiptsByBlockID: {flow.RoleVerification}, + channels.ProvideChunks: {flow.RoleVerification}, // channel alias RequestChunks = ProvideChunks }, } AuthorizationConfigs[ChunkDataResponse] = MsgAuthConfig{ @@ -220,10 +217,7 @@ func initializeMessageAuthConfigsMap() { return new(messages.ChunkDataResponse) }, Config: map[channels.Channel]flow.RoleList{ - channels.ProvideChunks: {flow.RoleExecution}, // channel alias RequestChunks = ProvideChunks - channels.RequestCollections: {flow.RoleExecution}, - channels.RequestApprovalsByChunk: {flow.RoleExecution}, - channels.RequestReceiptsByBlockID: {flow.RoleExecution}, + channels.ProvideChunks: {flow.RoleExecution}, // channel alias RequestChunks = ProvideChunks }, } From 2851495d6d54d666367e626132e32d33acd7a109 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 28 Jun 2022 12:55:12 -0400 Subject: [PATCH 040/223] regenerate mocks --- insecure/mock/attack_network.go | 13 ++++++++----- insecure/mock/attack_orchestrator.go | 13 ++++++++----- insecure/mock/corrupted_node_connection.go | 13 ++++++++----- insecure/mock/corrupted_node_connector.go | 13 ++++++++----- ...ible_conduit_factory__connect_attacker_client.go | 13 ++++++++----- ...ible_conduit_factory__connect_attacker_server.go | 13 ++++++++----- ...duit_factory__process_attacker_message_client.go | 13 ++++++++----- ...duit_factory__process_attacker_message_server.go | 13 ++++++++----- insecure/mock/corruptible_conduit_factory_client.go | 13 ++++++++----- insecure/mock/corruptible_conduit_factory_server.go | 13 ++++++++----- 10 files changed, 80 insertions(+), 50 deletions(-) diff --git a/insecure/mock/attack_network.go b/insecure/mock/attack_network.go index f6f799df6bf..f10b649e0fd 100644 --- a/insecure/mock/attack_network.go +++ b/insecure/mock/attack_network.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.12.1. DO NOT EDIT. +// Code generated by mockery v2.13.0. DO NOT EDIT. package mockinsecure @@ -7,8 +7,6 @@ import ( irrecoverable "github.com/onflow/flow-go/module/irrecoverable" mock "github.com/stretchr/testify/mock" - - testing "testing" ) // AttackNetwork is an autogenerated mock type for the AttackNetwork type @@ -72,8 +70,13 @@ func (_m *AttackNetwork) Start(_a0 irrecoverable.SignalerContext) { _m.Called(_a0) } -// NewAttackNetwork creates a new instance of AttackNetwork. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewAttackNetwork(t testing.TB) *AttackNetwork { +type NewAttackNetworkT interface { + mock.TestingT + Cleanup(func()) +} + +// NewAttackNetwork creates a new instance of AttackNetwork. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewAttackNetwork(t NewAttackNetworkT) *AttackNetwork { mock := &AttackNetwork{} mock.Mock.Test(t) diff --git a/insecure/mock/attack_orchestrator.go b/insecure/mock/attack_orchestrator.go index 9b7cb5f5e36..5d086f44777 100644 --- a/insecure/mock/attack_orchestrator.go +++ b/insecure/mock/attack_orchestrator.go @@ -1,12 +1,10 @@ -// Code generated by mockery v2.12.1. DO NOT EDIT. +// Code generated by mockery v2.13.0. DO NOT EDIT. package mockinsecure import ( insecure "github.com/onflow/flow-go/insecure" mock "github.com/stretchr/testify/mock" - - testing "testing" ) // AttackOrchestrator is an autogenerated mock type for the AttackOrchestrator type @@ -33,8 +31,13 @@ func (_m *AttackOrchestrator) WithAttackNetwork(_a0 insecure.AttackNetwork) { _m.Called(_a0) } -// NewAttackOrchestrator creates a new instance of AttackOrchestrator. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewAttackOrchestrator(t testing.TB) *AttackOrchestrator { +type NewAttackOrchestratorT interface { + mock.TestingT + Cleanup(func()) +} + +// NewAttackOrchestrator creates a new instance of AttackOrchestrator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewAttackOrchestrator(t NewAttackOrchestratorT) *AttackOrchestrator { mock := &AttackOrchestrator{} mock.Mock.Test(t) diff --git a/insecure/mock/corrupted_node_connection.go b/insecure/mock/corrupted_node_connection.go index f5cd6919f82..cdedf081c60 100644 --- a/insecure/mock/corrupted_node_connection.go +++ b/insecure/mock/corrupted_node_connection.go @@ -1,12 +1,10 @@ -// Code generated by mockery v2.12.1. DO NOT EDIT. +// Code generated by mockery v2.13.0. DO NOT EDIT. package mockinsecure import ( insecure "github.com/onflow/flow-go/insecure" mock "github.com/stretchr/testify/mock" - - testing "testing" ) // CorruptedNodeConnection is an autogenerated mock type for the CorruptedNodeConnection type @@ -42,8 +40,13 @@ func (_m *CorruptedNodeConnection) SendMessage(_a0 *insecure.Message) error { return r0 } -// NewCorruptedNodeConnection creates a new instance of CorruptedNodeConnection. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewCorruptedNodeConnection(t testing.TB) *CorruptedNodeConnection { +type NewCorruptedNodeConnectionT interface { + mock.TestingT + Cleanup(func()) +} + +// NewCorruptedNodeConnection creates a new instance of CorruptedNodeConnection. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewCorruptedNodeConnection(t NewCorruptedNodeConnectionT) *CorruptedNodeConnection { mock := &CorruptedNodeConnection{} mock.Mock.Test(t) diff --git a/insecure/mock/corrupted_node_connector.go b/insecure/mock/corrupted_node_connector.go index 3b782636936..0d045ad9134 100644 --- a/insecure/mock/corrupted_node_connector.go +++ b/insecure/mock/corrupted_node_connector.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.12.1. DO NOT EDIT. +// Code generated by mockery v2.13.0. DO NOT EDIT. package mockinsecure @@ -9,8 +9,6 @@ import ( irrecoverable "github.com/onflow/flow-go/module/irrecoverable" mock "github.com/stretchr/testify/mock" - - testing "testing" ) // CorruptedNodeConnector is an autogenerated mock type for the CorruptedNodeConnector type @@ -46,8 +44,13 @@ func (_m *CorruptedNodeConnector) WithIncomingMessageHandler(_a0 func(*insecure. _m.Called(_a0) } -// NewCorruptedNodeConnector creates a new instance of CorruptedNodeConnector. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewCorruptedNodeConnector(t testing.TB) *CorruptedNodeConnector { +type NewCorruptedNodeConnectorT interface { + mock.TestingT + Cleanup(func()) +} + +// NewCorruptedNodeConnector creates a new instance of CorruptedNodeConnector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewCorruptedNodeConnector(t NewCorruptedNodeConnectorT) *CorruptedNodeConnector { mock := &CorruptedNodeConnector{} mock.Mock.Test(t) diff --git a/insecure/mock/corruptible_conduit_factory__connect_attacker_client.go b/insecure/mock/corruptible_conduit_factory__connect_attacker_client.go index eec608971fa..b4d022bb074 100644 --- a/insecure/mock/corruptible_conduit_factory__connect_attacker_client.go +++ b/insecure/mock/corruptible_conduit_factory__connect_attacker_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.12.1. DO NOT EDIT. +// Code generated by mockery v2.13.0. DO NOT EDIT. package mockinsecure @@ -9,8 +9,6 @@ import ( metadata "google.golang.org/grpc/metadata" mock "github.com/stretchr/testify/mock" - - testing "testing" ) // CorruptibleConduitFactory_ConnectAttackerClient is an autogenerated mock type for the CorruptibleConduitFactory_ConnectAttackerClient type @@ -138,8 +136,13 @@ func (_m *CorruptibleConduitFactory_ConnectAttackerClient) Trailer() metadata.MD return r0 } -// NewCorruptibleConduitFactory_ConnectAttackerClient creates a new instance of CorruptibleConduitFactory_ConnectAttackerClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewCorruptibleConduitFactory_ConnectAttackerClient(t testing.TB) *CorruptibleConduitFactory_ConnectAttackerClient { +type NewCorruptibleConduitFactory_ConnectAttackerClientT interface { + mock.TestingT + Cleanup(func()) +} + +// NewCorruptibleConduitFactory_ConnectAttackerClient creates a new instance of CorruptibleConduitFactory_ConnectAttackerClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewCorruptibleConduitFactory_ConnectAttackerClient(t NewCorruptibleConduitFactory_ConnectAttackerClientT) *CorruptibleConduitFactory_ConnectAttackerClient { mock := &CorruptibleConduitFactory_ConnectAttackerClient{} mock.Mock.Test(t) diff --git a/insecure/mock/corruptible_conduit_factory__connect_attacker_server.go b/insecure/mock/corruptible_conduit_factory__connect_attacker_server.go index 8c256ef0b93..5ca2fca4acb 100644 --- a/insecure/mock/corruptible_conduit_factory__connect_attacker_server.go +++ b/insecure/mock/corruptible_conduit_factory__connect_attacker_server.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.12.1. DO NOT EDIT. +// Code generated by mockery v2.13.0. DO NOT EDIT. package mockinsecure @@ -9,8 +9,6 @@ import ( metadata "google.golang.org/grpc/metadata" mock "github.com/stretchr/testify/mock" - - testing "testing" ) // CorruptibleConduitFactory_ConnectAttackerServer is an autogenerated mock type for the CorruptibleConduitFactory_ConnectAttackerServer type @@ -109,8 +107,13 @@ func (_m *CorruptibleConduitFactory_ConnectAttackerServer) SetTrailer(_a0 metada _m.Called(_a0) } -// NewCorruptibleConduitFactory_ConnectAttackerServer creates a new instance of CorruptibleConduitFactory_ConnectAttackerServer. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewCorruptibleConduitFactory_ConnectAttackerServer(t testing.TB) *CorruptibleConduitFactory_ConnectAttackerServer { +type NewCorruptibleConduitFactory_ConnectAttackerServerT interface { + mock.TestingT + Cleanup(func()) +} + +// NewCorruptibleConduitFactory_ConnectAttackerServer creates a new instance of CorruptibleConduitFactory_ConnectAttackerServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewCorruptibleConduitFactory_ConnectAttackerServer(t NewCorruptibleConduitFactory_ConnectAttackerServerT) *CorruptibleConduitFactory_ConnectAttackerServer { mock := &CorruptibleConduitFactory_ConnectAttackerServer{} mock.Mock.Test(t) diff --git a/insecure/mock/corruptible_conduit_factory__process_attacker_message_client.go b/insecure/mock/corruptible_conduit_factory__process_attacker_message_client.go index a20edce5acf..fa2f327e769 100644 --- a/insecure/mock/corruptible_conduit_factory__process_attacker_message_client.go +++ b/insecure/mock/corruptible_conduit_factory__process_attacker_message_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.12.1. DO NOT EDIT. +// Code generated by mockery v2.13.0. DO NOT EDIT. package mockinsecure @@ -11,8 +11,6 @@ import ( metadata "google.golang.org/grpc/metadata" mock "github.com/stretchr/testify/mock" - - testing "testing" ) // CorruptibleConduitFactory_ProcessAttackerMessageClient is an autogenerated mock type for the CorruptibleConduitFactory_ProcessAttackerMessageClient type @@ -154,8 +152,13 @@ func (_m *CorruptibleConduitFactory_ProcessAttackerMessageClient) Trailer() meta return r0 } -// NewCorruptibleConduitFactory_ProcessAttackerMessageClient creates a new instance of CorruptibleConduitFactory_ProcessAttackerMessageClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewCorruptibleConduitFactory_ProcessAttackerMessageClient(t testing.TB) *CorruptibleConduitFactory_ProcessAttackerMessageClient { +type NewCorruptibleConduitFactory_ProcessAttackerMessageClientT interface { + mock.TestingT + Cleanup(func()) +} + +// NewCorruptibleConduitFactory_ProcessAttackerMessageClient creates a new instance of CorruptibleConduitFactory_ProcessAttackerMessageClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewCorruptibleConduitFactory_ProcessAttackerMessageClient(t NewCorruptibleConduitFactory_ProcessAttackerMessageClientT) *CorruptibleConduitFactory_ProcessAttackerMessageClient { mock := &CorruptibleConduitFactory_ProcessAttackerMessageClient{} mock.Mock.Test(t) diff --git a/insecure/mock/corruptible_conduit_factory__process_attacker_message_server.go b/insecure/mock/corruptible_conduit_factory__process_attacker_message_server.go index a14007afac6..8847ec2da9c 100644 --- a/insecure/mock/corruptible_conduit_factory__process_attacker_message_server.go +++ b/insecure/mock/corruptible_conduit_factory__process_attacker_message_server.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.12.1. DO NOT EDIT. +// Code generated by mockery v2.13.0. DO NOT EDIT. package mockinsecure @@ -11,8 +11,6 @@ import ( metadata "google.golang.org/grpc/metadata" mock "github.com/stretchr/testify/mock" - - testing "testing" ) // CorruptibleConduitFactory_ProcessAttackerMessageServer is an autogenerated mock type for the CorruptibleConduitFactory_ProcessAttackerMessageServer type @@ -134,8 +132,13 @@ func (_m *CorruptibleConduitFactory_ProcessAttackerMessageServer) SetTrailer(_a0 _m.Called(_a0) } -// NewCorruptibleConduitFactory_ProcessAttackerMessageServer creates a new instance of CorruptibleConduitFactory_ProcessAttackerMessageServer. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewCorruptibleConduitFactory_ProcessAttackerMessageServer(t testing.TB) *CorruptibleConduitFactory_ProcessAttackerMessageServer { +type NewCorruptibleConduitFactory_ProcessAttackerMessageServerT interface { + mock.TestingT + Cleanup(func()) +} + +// NewCorruptibleConduitFactory_ProcessAttackerMessageServer creates a new instance of CorruptibleConduitFactory_ProcessAttackerMessageServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewCorruptibleConduitFactory_ProcessAttackerMessageServer(t NewCorruptibleConduitFactory_ProcessAttackerMessageServerT) *CorruptibleConduitFactory_ProcessAttackerMessageServer { mock := &CorruptibleConduitFactory_ProcessAttackerMessageServer{} mock.Mock.Test(t) diff --git a/insecure/mock/corruptible_conduit_factory_client.go b/insecure/mock/corruptible_conduit_factory_client.go index b87eb1f0fa6..24686801504 100644 --- a/insecure/mock/corruptible_conduit_factory_client.go +++ b/insecure/mock/corruptible_conduit_factory_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.12.1. DO NOT EDIT. +// Code generated by mockery v2.13.0. DO NOT EDIT. package mockinsecure @@ -11,8 +11,6 @@ import ( insecure "github.com/onflow/flow-go/insecure" mock "github.com/stretchr/testify/mock" - - testing "testing" ) // CorruptibleConduitFactoryClient is an autogenerated mock type for the CorruptibleConduitFactoryClient type @@ -80,8 +78,13 @@ func (_m *CorruptibleConduitFactoryClient) ProcessAttackerMessage(ctx context.Co return r0, r1 } -// NewCorruptibleConduitFactoryClient creates a new instance of CorruptibleConduitFactoryClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewCorruptibleConduitFactoryClient(t testing.TB) *CorruptibleConduitFactoryClient { +type NewCorruptibleConduitFactoryClientT interface { + mock.TestingT + Cleanup(func()) +} + +// NewCorruptibleConduitFactoryClient creates a new instance of CorruptibleConduitFactoryClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewCorruptibleConduitFactoryClient(t NewCorruptibleConduitFactoryClientT) *CorruptibleConduitFactoryClient { mock := &CorruptibleConduitFactoryClient{} mock.Mock.Test(t) diff --git a/insecure/mock/corruptible_conduit_factory_server.go b/insecure/mock/corruptible_conduit_factory_server.go index b0a9b5ad668..4e6766a92ea 100644 --- a/insecure/mock/corruptible_conduit_factory_server.go +++ b/insecure/mock/corruptible_conduit_factory_server.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.12.1. DO NOT EDIT. +// Code generated by mockery v2.13.0. DO NOT EDIT. package mockinsecure @@ -7,8 +7,6 @@ import ( emptypb "google.golang.org/protobuf/types/known/emptypb" mock "github.com/stretchr/testify/mock" - - testing "testing" ) // CorruptibleConduitFactoryServer is an autogenerated mock type for the CorruptibleConduitFactoryServer type @@ -44,8 +42,13 @@ func (_m *CorruptibleConduitFactoryServer) ProcessAttackerMessage(_a0 insecure.C return r0 } -// NewCorruptibleConduitFactoryServer creates a new instance of CorruptibleConduitFactoryServer. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewCorruptibleConduitFactoryServer(t testing.TB) *CorruptibleConduitFactoryServer { +type NewCorruptibleConduitFactoryServerT interface { + mock.TestingT + Cleanup(func()) +} + +// NewCorruptibleConduitFactoryServer creates a new instance of CorruptibleConduitFactoryServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewCorruptibleConduitFactoryServer(t NewCorruptibleConduitFactoryServerT) *CorruptibleConduitFactoryServer { mock := &CorruptibleConduitFactoryServer{} mock.Mock.Test(t) From af1a26c7d7a727133faf3f5abf777a53869d11cc Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Tue, 28 Jun 2022 13:53:08 -0400 Subject: [PATCH 041/223] add done channel to HotstuffFollower --- consensus/hotstuff/follower_loop.go | 23 ++++++++++++++++++----- engine/common/follower/engine.go | 9 +++++++-- module/hotstuff.go | 2 +- 3 files changed, 26 insertions(+), 8 deletions(-) diff --git a/consensus/hotstuff/follower_loop.go b/consensus/hotstuff/follower_loop.go index 00786d04ece..9ed277ad3bd 100644 --- a/consensus/hotstuff/follower_loop.go +++ b/consensus/hotstuff/follower_loop.go @@ -11,11 +11,16 @@ import ( "github.com/onflow/flow-go/utils/logging" ) +type proposalTask struct { + *model.Proposal + done chan struct{} +} + // FollowerLoop implements interface FollowerLoop type FollowerLoop struct { log zerolog.Logger followerLogic FollowerLogic - proposals chan *model.Proposal + proposals chan proposalTask runner runner.SingleRunner // lock for preventing concurrent state transitions } @@ -25,7 +30,7 @@ func NewFollowerLoop(log zerolog.Logger, followerLogic FollowerLogic) (*Follower return &FollowerLoop{ log: log, followerLogic: followerLogic, - proposals: make(chan *model.Proposal), + proposals: make(chan proposalTask), runner: runner.NewSingleRunner(), }, nil } @@ -35,10 +40,15 @@ func NewFollowerLoop(log zerolog.Logger, followerLogic FollowerLogic) (*Follower // // Block proposals must be submitted in order, i.e. a proposal's parent must // have been previously processed by the FollowerLoop. -func (fl *FollowerLoop) SubmitProposal(proposalHeader *flow.Header, parentView uint64) { +func (fl *FollowerLoop) SubmitProposal(proposalHeader *flow.Header, parentView uint64) chan struct{} { received := time.Now() - proposal := model.ProposalFromFlow(proposalHeader, parentView) + proposal := proposalTask{ + Proposal: model.ProposalFromFlow(proposalHeader, parentView), + done: make(chan struct{}), + } + fl.proposals <- proposal + // the busy duration is measured as how long it takes from a block being // received to a block being handled by the event handler. busyDuration := time.Since(received) @@ -46,6 +56,8 @@ func (fl *FollowerLoop) SubmitProposal(proposalHeader *flow.Header, parentView u Uint64("view", proposal.Block.View). Dur("busy_duration", busyDuration). Msg("busy duration to handle a proposal") + + return proposal.done } // loop will synchronously processes all events. @@ -63,7 +75,8 @@ func (fl *FollowerLoop) loop() { select { case p := <-fl.proposals: - err := fl.followerLogic.AddBlock(p) + err := fl.followerLogic.AddBlock(p.Proposal) + defer close(p.done) if err != nil { // all errors are fatal fl.log.Error(). Hex("block_id", logging.ID(p.Block.BlockID)). diff --git a/engine/common/follower/engine.go b/engine/common/follower/engine.go index ac6ed021ef5..9377ec12ba6 100644 --- a/engine/common/follower/engine.go +++ b/engine/common/follower/engine.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "time" "github.com/hashicorp/go-multierror" "github.com/rs/zerolog" @@ -368,8 +369,12 @@ func (e *Engine) processBlockAndDescendants(ctx context.Context, proposal *messa log.Info().Msg("forwarding block proposal to hotstuff") // submit the model to follower for processing - e.follower.SubmitProposal(header, parent.View) - + select { + case <-e.follower.SubmitProposal(header, parent.View): + break + case <-time.After(time.Millisecond * 200): + break + } // check for any descendants of the block to process err = e.processPendingChildren(ctx, header) if err != nil { diff --git a/module/hotstuff.go b/module/hotstuff.go index 44161263974..22ac9fdfa13 100644 --- a/module/hotstuff.go +++ b/module/hotstuff.go @@ -47,5 +47,5 @@ type HotStuffFollower interface { // // Block proposals must be submitted in order, i.e. a proposal's parent must // have been previously processed by the HotStuffFollower. - SubmitProposal(proposal *flow.Header, parentView uint64) + SubmitProposal(proposal *flow.Header, parentView uint64) (done chan struct{}) } From fa42d2546661fc78edf2e3e1132132f2e0f06349 Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Tue, 28 Jun 2022 14:04:22 -0400 Subject: [PATCH 042/223] update HotstuffFollower mock --- module/mock/hotstuff_metrics.go | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/module/mock/hotstuff_metrics.go b/module/mock/hotstuff_metrics.go index f6dc5c2c1d4..933c674fabe 100644 --- a/module/mock/hotstuff_metrics.go +++ b/module/mock/hotstuff_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.13.0. DO NOT EDIT. +// Code generated by mockery v1.0.0. DO NOT EDIT. package mock @@ -72,18 +72,3 @@ func (_m *HotstuffMetrics) SignerProcessingDuration(duration time.Duration) { func (_m *HotstuffMetrics) ValidatorProcessingDuration(duration time.Duration) { _m.Called(duration) } - -type NewHotstuffMetricsT interface { - mock.TestingT - Cleanup(func()) -} - -// NewHotstuffMetrics creates a new instance of HotstuffMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewHotstuffMetrics(t NewHotstuffMetricsT) *HotstuffMetrics { - mock := &HotstuffMetrics{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} From f75f58f720f418f2f22fb7a8857e2aa9acb13970 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 28 Jun 2022 14:14:25 -0400 Subject: [PATCH 043/223] fix channels usage conflict --- network/topology/randomizedTopology.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/network/topology/randomizedTopology.go b/network/topology/randomizedTopology.go index 3e5a89d1626..c4df8b5eb07 100644 --- a/network/topology/randomizedTopology.go +++ b/network/topology/randomizedTopology.go @@ -3,13 +3,13 @@ package topology import ( "fmt" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/crypto/random" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/seed" ) @@ -71,8 +71,8 @@ func NewRandomizedTopology(nodeID flow.Identifier, logger zerolog.Logger, edgePr // Independent invocations of GenerateFanout on different nodes collaboratively must construct a cohesive // connected graph of nodes that enables them talking to each other. This should be done with a very high probability // in randomized topology. -func (r RandomizedTopology) GenerateFanout(ids flow.IdentityList, channels channels.ChannelList) (flow.IdentityList, error) { - myUniqueChannels := channels.UniqueChannels(channels) +func (r RandomizedTopology) GenerateFanout(ids flow.IdentityList, channelList channels.ChannelList) (flow.IdentityList, error) { + myUniqueChannels := channels.UniqueChannels(channelList) if len(myUniqueChannels) == 0 { // no subscribed channel, hence skip topology creation // we do not return an error at this state as invocation of MakeTopology may happen before From 708d35d3e290b0f1208607c7c5fdc1146c4ed552 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 28 Jun 2022 14:41:12 -0400 Subject: [PATCH 044/223] Update topicBasedTopology.go --- network/topology/topicBasedTopology.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/network/topology/topicBasedTopology.go b/network/topology/topicBasedTopology.go index d585f8019d1..0050d219257 100644 --- a/network/topology/topicBasedTopology.go +++ b/network/topology/topicBasedTopology.go @@ -51,8 +51,8 @@ func NewTopicBasedTopology(nodeID flow.Identifier, logger zerolog.Logger, state // of the messages (i.e., publish and multicast). // Independent invocations of GenerateFanout on different nodes collaboratively must construct a cohesive // connected graph of nodes that enables them talking to each other. -func (t TopicBasedTopology) GenerateFanout(ids flow.IdentityList, channels channels.ChannelList) (flow.IdentityList, error) { - myUniqueChannels := channels.UniqueChannels(channels) +func (t TopicBasedTopology) GenerateFanout(ids flow.IdentityList, channelList channels.ChannelList) (flow.IdentityList, error) { + myUniqueChannels := channels.UniqueChannels(channelList) if len(myUniqueChannels) == 0 { // no subscribed channel, hence skip topology creation // we do not return an error at this state as invocation of MakeTopology may happen before From e2a1b2562a7f3e1694d2dc75933ec4d41c6a6ef2 Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Tue, 28 Jun 2022 14:50:31 -0400 Subject: [PATCH 045/223] fix unit tests --- engine/common/follower/engine_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/engine/common/follower/engine_test.go b/engine/common/follower/engine_test.go index 5b526cd555d..c119384569a 100644 --- a/engine/common/follower/engine_test.go +++ b/engine/common/follower/engine_test.go @@ -141,7 +141,7 @@ func (suite *Suite) TestHandleProposal() { // we do not have any children cached suite.cache.On("ByParentID", block.ID()).Return(nil, false) // the proposal should be forwarded to the follower - suite.follower.On("SubmitProposal", block.Header, parent.Header.View).Once() + suite.follower.On("SubmitProposal", block.Header, parent.Header.View).Once().Return(make(chan struct{})) // submit the block proposal := unittest.ProposalFromBlock(&block) @@ -207,8 +207,8 @@ func (suite *Suite) TestHandleProposalWithPendingChildren() { suite.headers.On("ByBlockID", parent.ID()).Return(parent.Header, nil) suite.headers.On("ByBlockID", block.ID()).Return(block.Header, nil).Once() // should submit to follower - suite.follower.On("SubmitProposal", block.Header, parent.Header.View).Once() - suite.follower.On("SubmitProposal", child.Header, block.Header.View).Once() + suite.follower.On("SubmitProposal", block.Header, parent.Header.View).Once().Return(make(chan struct{})) + suite.follower.On("SubmitProposal", child.Header, block.Header.View).Once().Return(make(chan struct{})) // we have one pending child cached pending := []*flow.PendingBlock{ From d705ca75ff02189e6f19c65c8811eb9bd10d9806 Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Tue, 28 Jun 2022 15:00:13 -0400 Subject: [PATCH 046/223] update HotstuffFollower mock --- module/mock/hot_stuff_follower.go | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/module/mock/hot_stuff_follower.go b/module/mock/hot_stuff_follower.go index 3ed2118ab1f..02838ee8136 100644 --- a/module/mock/hot_stuff_follower.go +++ b/module/mock/hot_stuff_follower.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.13.0. DO NOT EDIT. +// Code generated by mockery v1.0.0. DO NOT EDIT. package mock @@ -45,21 +45,17 @@ func (_m *HotStuffFollower) Ready() <-chan struct{} { } // SubmitProposal provides a mock function with given fields: proposal, parentView -func (_m *HotStuffFollower) SubmitProposal(proposal *flow.Header, parentView uint64) { - _m.Called(proposal, parentView) -} - -type NewHotStuffFollowerT interface { - mock.TestingT - Cleanup(func()) -} +func (_m *HotStuffFollower) SubmitProposal(proposal *flow.Header, parentView uint64) chan struct{} { + ret := _m.Called(proposal, parentView) -// NewHotStuffFollower creates a new instance of HotStuffFollower. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewHotStuffFollower(t NewHotStuffFollowerT) *HotStuffFollower { - mock := &HotStuffFollower{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) + var r0 chan struct{} + if rf, ok := ret.Get(0).(func(*flow.Header, uint64) chan struct{}); ok { + r0 = rf(proposal, parentView) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(chan struct{}) + } + } - return mock + return r0 } From 450776837905bb8ccd60b2358e90c3d5a4c3d789 Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Tue, 28 Jun 2022 15:04:17 -0400 Subject: [PATCH 047/223] Revert "update HotstuffFollower mock" This reverts commit fa42d2546661fc78edf2e3e1132132f2e0f06349. --- module/mock/hotstuff_metrics.go | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/module/mock/hotstuff_metrics.go b/module/mock/hotstuff_metrics.go index 933c674fabe..f6dc5c2c1d4 100644 --- a/module/mock/hotstuff_metrics.go +++ b/module/mock/hotstuff_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. +// Code generated by mockery v2.13.0. DO NOT EDIT. package mock @@ -72,3 +72,18 @@ func (_m *HotstuffMetrics) SignerProcessingDuration(duration time.Duration) { func (_m *HotstuffMetrics) ValidatorProcessingDuration(duration time.Duration) { _m.Called(duration) } + +type NewHotstuffMetricsT interface { + mock.TestingT + Cleanup(func()) +} + +// NewHotstuffMetrics creates a new instance of HotstuffMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewHotstuffMetrics(t NewHotstuffMetricsT) *HotstuffMetrics { + mock := &HotstuffMetrics{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} From ef1488acbffd0c36ddafe54ffb8fdf556202628f Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Tue, 28 Jun 2022 15:27:22 -0400 Subject: [PATCH 048/223] close the done channel without defer --- consensus/hotstuff/follower_loop.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/consensus/hotstuff/follower_loop.go b/consensus/hotstuff/follower_loop.go index 9ed277ad3bd..5218f7ec568 100644 --- a/consensus/hotstuff/follower_loop.go +++ b/consensus/hotstuff/follower_loop.go @@ -76,7 +76,8 @@ func (fl *FollowerLoop) loop() { select { case p := <-fl.proposals: err := fl.followerLogic.AddBlock(p.Proposal) - defer close(p.done) + close(p.done) + if err != nil { // all errors are fatal fl.log.Error(). Hex("block_id", logging.ID(p.Block.BlockID)). From 6b89a9d12de6814759541bf9366b83b43f11b601 Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Tue, 28 Jun 2022 15:30:16 -0400 Subject: [PATCH 049/223] removed tempfix comment --- engine/common/synchronization/engine.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/engine/common/synchronization/engine.go b/engine/common/synchronization/engine.go index 81114d9cdf4..abb66ddea83 100644 --- a/engine/common/synchronization/engine.go +++ b/engine/common/synchronization/engine.go @@ -311,9 +311,7 @@ func (e *Engine) onBlockResponse(originID flow.Identifier, res *messages.BlockRe OriginID: originID, Block: block, } - // tempfix: to help nodes falling far behind to catch up. - // it avoids the race condition in compliance engine and hotstuff to validate blocks - // time.Sleep(150 * time.Millisecond) + e.comp.SubmitLocal(synced) } } From 059356c6ff312c86d7044fc4fa69726556c7b432 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 28 Jun 2022 15:38:27 -0400 Subject: [PATCH 050/223] update test mock network.channel -> channels.channel - fix channels import and var name conflicts - update authorized sender validator topic validator test --- engine/access/relay/engine.go | 4 ++-- engine/common/splitter/network/network_test.go | 2 +- engine/execution/execution_test.go | 12 ++++++------ engine/verification/utils/unittest/helper.go | 4 ++-- network/channels/channels_test.go | 12 ++++++------ network/p2p/topic_validator_test.go | 16 ++++++++-------- network/proxy/network_test.go | 2 +- network/slashing/violations_consumer.go | 6 +++--- utils/unittest/network/network.go | 2 +- 9 files changed, 30 insertions(+), 30 deletions(-) diff --git a/engine/access/relay/engine.go b/engine/access/relay/engine.go index 8abeabd0943..06691501a4b 100644 --- a/engine/access/relay/engine.go +++ b/engine/access/relay/engine.go @@ -20,7 +20,7 @@ type Engine struct { func New( log zerolog.Logger, - channels channels.ChannelList, + channelList channels.ChannelList, net network.Network, unstakedNet network.Network, ) (*Engine, error) { @@ -30,7 +30,7 @@ func New( conduits: make(map[channels.Channel]network.Conduit), } - for _, channel := range channels { + for _, channel := range channelList { _, err := net.Register(channel, e) if err != nil { return nil, fmt.Errorf("could not register relay engine on channel: %w", err) diff --git a/engine/common/splitter/network/network_test.go b/engine/common/splitter/network/network_test.go index 1c18613cabe..74af229810d 100644 --- a/engine/common/splitter/network/network_test.go +++ b/engine/common/splitter/network/network_test.go @@ -36,7 +36,7 @@ func (suite *Suite) SetupTest() { suite.con = new(mocknetwork.Conduit) suite.engines = make(map[channels.Channel]network.MessageProcessor) - net.On("Register", mock.AnythingOfType("network.Channel"), mock.Anything).Run(func(args mock.Arguments) { + net.On("Register", mock.AnythingOfType("channels.Channel"), mock.Anything).Run(func(args mock.Arguments) { channel, _ := args.Get(0).(channels.Channel) engine, ok := args.Get(1).(network.MessageProcessor) suite.Assert().True(ok) diff --git a/engine/execution/execution_test.go b/engine/execution/execution_test.go index 22a9bc1aec3..4971ff0aac0 100644 --- a/engine/execution/execution_test.go +++ b/engine/execution/execution_test.go @@ -141,7 +141,7 @@ func TestExecutionFlow(t *testing.T) { // check collection node received the collection request from execution node providerEngine := new(mocknetwork.Engine) provConduit, _ := collectionNode.Net.Register(channels.ProvideCollections, providerEngine) - providerEngine.On("Process", mock.AnythingOfType("network.Channel"), exeID.NodeID, mock.Anything). + providerEngine.On("Process", mock.AnythingOfType("channels.Channel"), exeID.NodeID, mock.Anything). Run(func(args mock.Arguments) { originID := args.Get(1).(flow.Identifier) req := args.Get(2).(*messages.EntityRequest) @@ -178,7 +178,7 @@ func TestExecutionFlow(t *testing.T) { // check the verification engine received the ER from execution node verificationEngine := new(mocknetwork.Engine) _, _ = verificationNode.Net.Register(channels.ReceiveReceipts, verificationEngine) - verificationEngine.On("Process", mock.AnythingOfType("network.Channel"), exeID.NodeID, mock.Anything). + verificationEngine.On("Process", mock.AnythingOfType("channels.Channel"), exeID.NodeID, mock.Anything). Run(func(args mock.Arguments) { lock.Lock() defer lock.Unlock() @@ -193,7 +193,7 @@ func TestExecutionFlow(t *testing.T) { // check the consensus engine has received the result from execution node consensusEngine := new(mocknetwork.Engine) _, _ = consensusNode.Net.Register(channels.ReceiveReceipts, consensusEngine) - consensusEngine.On("Process", mock.AnythingOfType("network.Channel"), exeID.NodeID, mock.Anything). + consensusEngine.On("Process", mock.AnythingOfType("channels.Channel"), exeID.NodeID, mock.Anything). Run(func(args mock.Arguments) { lock.Lock() defer lock.Unlock() @@ -404,7 +404,7 @@ func TestFailedTxWillNotChangeStateCommitment(t *testing.T) { consensusEngine := new(mocknetwork.Engine) _, _ = consensusNode.Net.Register(channels.ReceiveReceipts, consensusEngine) - consensusEngine.On("Process", mock.AnythingOfType("network.Channel"), mock.Anything, mock.Anything). + consensusEngine.On("Process", mock.AnythingOfType("channels.Channel"), mock.Anything, mock.Anything). Run(func(args mock.Arguments) { receiptsReceived.Inc() originID := args[1].(flow.Identifier) @@ -474,7 +474,7 @@ func mockCollectionEngineToReturnCollections(t *testing.T, collectionNode *testm blob, _ := msgpack.Marshal(col) colMap[col.ID()] = blob } - collectionEngine.On("Process", mock.AnythingOfType("network.Channel"), mock.Anything, mock.Anything). + collectionEngine.On("Process", mock.AnythingOfType("channels.Channel"), mock.Anything, mock.Anything). Run(func(args mock.Arguments) { originID := args[1].(flow.Identifier) req := args[2].(*messages.EntityRequest) @@ -549,7 +549,7 @@ func TestBroadcastToMultipleVerificationNodes(t *testing.T) { verificationEngine := new(mocknetwork.Engine) _, _ = verification1Node.Net.Register(channels.ReceiveReceipts, verificationEngine) _, _ = verification2Node.Net.Register(channels.ReceiveReceipts, verificationEngine) - verificationEngine.On("Process", mock.AnythingOfType("network.Channel"), exeID.NodeID, mock.Anything). + verificationEngine.On("Process", mock.AnythingOfType("channels.Channel"), exeID.NodeID, mock.Anything). Run(func(args mock.Arguments) { actualCalls.Inc() diff --git a/engine/verification/utils/unittest/helper.go b/engine/verification/utils/unittest/helper.go index 184a0495a34..0f5ed286a16 100644 --- a/engine/verification/utils/unittest/helper.go +++ b/engine/verification/utils/unittest/helper.go @@ -68,7 +68,7 @@ func SetupChunkDataPackProvider(t *testing.T, mu := &sync.Mutex{} // making testify Run thread-safe - exeEngine.On("Process", testifymock.AnythingOfType("network.Channel"), testifymock.Anything, testifymock.Anything). + exeEngine.On("Process", testifymock.AnythingOfType("channels.Channel"), testifymock.Anything, testifymock.Anything). Run(func(args testifymock.Arguments) { mu.Lock() defer mu.Unlock() @@ -183,7 +183,7 @@ func SetupMockConsensusNode(t *testing.T, mu := &sync.Mutex{} // making testify mock thread-safe - conEngine.On("Process", testifymock.AnythingOfType("network.Channel"), testifymock.Anything, testifymock.Anything). + conEngine.On("Process", testifymock.AnythingOfType("channels.Channel"), testifymock.Anything, testifymock.Anything). Run(func(args testifymock.Arguments) { mu.Lock() defer mu.Unlock() diff --git a/network/channels/channels_test.go b/network/channels/channels_test.go index 36bae3b14e5..37b2ca6c811 100644 --- a/network/channels/channels_test.go +++ b/network/channels/channels_test.go @@ -119,13 +119,13 @@ func TestUniqueChannels_Uniqueness(t *testing.T) { // We use the identifier of RoleList to determine their uniqueness. func TestUniqueChannels_ClusterChannels(t *testing.T) { channels := ChannelsByRole(flow.RoleCollection) - consensusCluster := channels.ChannelConsensusCluster(flow.Emulator) - syncCluster := channels.ChannelSyncCluster(flow.Emulator) + consensusCluster := ConsensusCluster(flow.Emulator) + syncCluster := SyncCluster(flow.Emulator) channels = append(channels, consensusCluster, syncCluster) - uniques := channels.UniqueChannels(channels) + uniques := UniqueChannels(channels) // collection role has two cluster and one non-cluster channels all with the same RoleList. // Hence all of them should be returned as unique channels. - require.Contains(t, uniques, syncCluster) // cluster channel - require.Contains(t, uniques, consensusCluster) // cluster channel - require.Contains(t, uniques, channels.PushTransactions) // non-cluster channel + require.Contains(t, uniques, syncCluster) // cluster channel + require.Contains(t, uniques, consensusCluster) // cluster channel + require.Contains(t, uniques, PushTransactions) // non-cluster channel } diff --git a/network/p2p/topic_validator_test.go b/network/p2p/topic_validator_test.go index f20542748ed..0702af418ea 100644 --- a/network/p2p/topic_validator_test.go +++ b/network/p2p/topic_validator_test.go @@ -169,11 +169,11 @@ func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { // setup hooked logger var hookCalls uint64 hook := zerolog.HookFunc(func(e *zerolog.Event, level zerolog.Level, message string) { - if level == zerolog.WarnLevel { + if level == zerolog.ErrorLevel { atomic.AddUint64(&hookCalls, 1) } }) - logger := zerolog.New(os.Stdout).Level(zerolog.WarnLevel).Hook(hook) + logger := zerolog.New(os.Stdout).Level(zerolog.ErrorLevel).Hook(hook) sn1, identity1 := nodeFixture(t, context.Background(), sporkId, "TestAuthorizedSenderValidator_InvalidMsg", withRole(flow.RoleConsensus)) sn2, identity2 := nodeFixture(t, context.Background(), sporkId, "TestAuthorizedSenderValidator_InvalidMsg", withRole(flow.RoleConsensus)) @@ -263,7 +263,7 @@ func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { unittest.RequireReturnsBefore(t, wg.Wait, 5*time.Second, "could not receive message on time") - // expecting 1 warn calls for each rejected message from unauthorized node + // expecting 1 error log for each rejected message from unauthorized node require.Equalf(t, uint64(1), hookCalls, "expected 1 warning to be logged") } @@ -285,11 +285,11 @@ func TestAuthorizedSenderValidator_InvalidMsg(t *testing.T) { // setup hooked logger var hookCalls uint64 hook := zerolog.HookFunc(func(e *zerolog.Event, level zerolog.Level, message string) { - if level == zerolog.WarnLevel { + if level == zerolog.ErrorLevel { atomic.AddUint64(&hookCalls, 1) } }) - logger := zerolog.New(os.Stdout).Level(zerolog.WarnLevel).Hook(hook) + logger := zerolog.New(os.Stdout).Level(zerolog.ErrorLevel).Hook(hook) authorizedSenderValidator := validator.AuthorizedSenderValidator(logger, channel, func(pid peer.ID) (*flow.Identity, bool) { fid, err := translator.GetFlowID(pid) @@ -334,7 +334,7 @@ func TestAuthorizedSenderValidator_InvalidMsg(t *testing.T) { unittest.RequireReturnsBefore(t, wg.Wait, 5*time.Second, "could not receive message on time") - // expecting 1 warn calls for each rejected message from ejected node + // expecting 1 error log for each rejected message from ejected node require.Equalf(t, uint64(1), hookCalls, "expected 1 warning to be logged") } @@ -356,11 +356,11 @@ func TestAuthorizedSenderValidator_Ejected(t *testing.T) { // setup hooked logger var hookCalls uint64 hook := zerolog.HookFunc(func(e *zerolog.Event, level zerolog.Level, message string) { - if level == zerolog.WarnLevel { + if level == zerolog.ErrorLevel { atomic.AddUint64(&hookCalls, 1) } }) - logger := zerolog.New(os.Stdout).Level(zerolog.WarnLevel).Hook(hook) + logger := zerolog.New(os.Stdout).Level(zerolog.ErrorLevel).Hook(hook) authorizedSenderValidator := validator.AuthorizedSenderValidator(logger, channel, func(pid peer.ID) (*flow.Identity, bool) { fid, err := translator.GetFlowID(pid) diff --git a/network/proxy/network_test.go b/network/proxy/network_test.go index 40f9cd69469..e5d9046398a 100644 --- a/network/proxy/network_test.go +++ b/network/proxy/network_test.go @@ -44,7 +44,7 @@ func (suite *Suite) SetupTest() { suite.proxyNet = proxy.NewProxyNetwork(suite.net, suite.targetNodeID) suite.engine = new(mocknetwork.Engine) - net.On("Register", mock.AnythingOfType("network.Channel"), mock.Anything).Return(suite.con, nil) + net.On("Register", mock.AnythingOfType("channels.Channel"), mock.Anything).Return(suite.con, nil) } // TestUnicast tests that the Unicast method is translated to a unicast to the target node diff --git a/network/slashing/violations_consumer.go b/network/slashing/violations_consumer.go index f6a2411ad20..8564aec3f11 100644 --- a/network/slashing/violations_consumer.go +++ b/network/slashing/violations_consumer.go @@ -27,7 +27,7 @@ func NewSlashingViolationsConsumer(log zerolog.Logger) *SlashingViolationsConsum // OnUnAuthorizedSenderError logs a warning for unauthorized sender error func (c *SlashingViolationsConsumer) OnUnAuthorizedSenderError(identity *flow.Identity, peerID, msgType string, err error) { - c.log.Warn(). + c.log.Error(). Err(err). Str("peer_id", peerID). Str("role", identity.Role.String()). @@ -39,7 +39,7 @@ func (c *SlashingViolationsConsumer) OnUnAuthorizedSenderError(identity *flow.Id // OnUnknownMsgTypeError logs a warning for unknown message type error func (c *SlashingViolationsConsumer) OnUnknownMsgTypeError(identity *flow.Identity, peerID, msgType string, err error) { - c.log.Warn(). + c.log.Error(). Err(err). Str("peer_id", peerID). Str("role", identity.Role.String()). @@ -51,7 +51,7 @@ func (c *SlashingViolationsConsumer) OnUnknownMsgTypeError(identity *flow.Identi // OnSenderEjectedError logs a warning for sender ejected error func (c *SlashingViolationsConsumer) OnSenderEjectedError(identity *flow.Identity, peerID, msgType string, err error) { - c.log.Warn(). + c.log.Error(). Err(err). Str("peer_id", peerID). Str("role", identity.Role.String()). diff --git a/utils/unittest/network/network.go b/utils/unittest/network/network.go index 900139752be..8b8b40eb525 100644 --- a/utils/unittest/network/network.go +++ b/utils/unittest/network/network.go @@ -93,7 +93,7 @@ func NewEngine() *Engine { // OnProcess specifies the callback that should be executed when `Process` is called on this mock engine. func (e *Engine) OnProcess(processFunc EngineProcessFunc) *Engine { - e.On("Process", mock.AnythingOfType("network.Channel"), mock.AnythingOfType("flow.Identifier"), mock.Anything). + e.On("Process", mock.AnythingOfType("channels.Channel"), mock.AnythingOfType("flow.Identifier"), mock.Anything). Return((func(channels.Channel, flow.Identifier, interface{}) error)(processFunc)) return e From ead889168a9afadeaff7c56b88b45ac8eeb59db4 Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Tue, 28 Jun 2022 16:03:53 -0400 Subject: [PATCH 051/223] fix lint --- consensus/hotstuff/follower_loop.go | 2 +- engine/common/synchronization/engine.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/hotstuff/follower_loop.go b/consensus/hotstuff/follower_loop.go index 5218f7ec568..ad46de4d11f 100644 --- a/consensus/hotstuff/follower_loop.go +++ b/consensus/hotstuff/follower_loop.go @@ -77,7 +77,7 @@ func (fl *FollowerLoop) loop() { case p := <-fl.proposals: err := fl.followerLogic.AddBlock(p.Proposal) close(p.done) - + if err != nil { // all errors are fatal fl.log.Error(). Hex("block_id", logging.ID(p.Block.BlockID)). diff --git a/engine/common/synchronization/engine.go b/engine/common/synchronization/engine.go index abb66ddea83..c3ae630788d 100644 --- a/engine/common/synchronization/engine.go +++ b/engine/common/synchronization/engine.go @@ -311,7 +311,7 @@ func (e *Engine) onBlockResponse(originID flow.Identifier, res *messages.BlockRe OriginID: originID, Block: block, } - + e.comp.SubmitLocal(synced) } } From 28f52cb89e0b25d4726d00043ac52f3e594ad2fe Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 28 Jun 2022 16:23:46 -0400 Subject: [PATCH 052/223] fix imports , add nolint statement --- apiproxy/access_api_proxy_test.go | 2 +- cmd/access/node_builder/access_node_builder.go | 2 +- cmd/collection/main.go | 2 +- cmd/consensus/main.go | 2 +- engine/access/relay/engine.go | 2 +- engine/access/relay/engine_test.go | 2 +- engine/access/relay/example_test.go | 2 +- follower/follower_builder.go | 3 +-- network/test/echoengine.go | 2 +- network/test/meshengine.go | 2 +- network/topology/cache.go | 2 +- 11 files changed, 11 insertions(+), 12 deletions(-) diff --git a/apiproxy/access_api_proxy_test.go b/apiproxy/access_api_proxy_test.go index 3a2cf28bb17..e8ee18f9258 100644 --- a/apiproxy/access_api_proxy_test.go +++ b/apiproxy/access_api_proxy_test.go @@ -245,7 +245,7 @@ func openFlowLite(address string) error { c, err := grpc.Dial( "unix://"+address, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcutils.DefaultMaxMsgSize)), - grpc.WithInsecure()) + grpc.WithInsecure()) //nolint:staticcheck if err != nil { return err } diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index 56a6ed77b19..737ce5544af 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -15,7 +15,6 @@ import ( "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/routing" pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow/protobuf/go/flow/access" "github.com/rs/zerolog" "github.com/spf13/pflag" @@ -57,6 +56,7 @@ import ( "github.com/onflow/flow-go/module/synchronization" "github.com/onflow/flow-go/network" netcache "github.com/onflow/flow-go/network/cache" + "github.com/onflow/flow-go/network/channels" cborcodec "github.com/onflow/flow-go/network/codec/cbor" "github.com/onflow/flow-go/network/compressor" "github.com/onflow/flow-go/network/p2p" diff --git a/cmd/collection/main.go b/cmd/collection/main.go index 1a1c14f9e53..fd671fb5454 100644 --- a/cmd/collection/main.go +++ b/cmd/collection/main.go @@ -4,7 +4,6 @@ import ( "fmt" "time" - "github.com/onflow/flow-go/network/channels" "github.com/spf13/pflag" "github.com/onflow/flow-go/cmd/util/cmd/common" @@ -42,6 +41,7 @@ import ( epochpool "github.com/onflow/flow-go/module/mempool/epochs" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/synchronization" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/blocktimer" diff --git a/cmd/consensus/main.go b/cmd/consensus/main.go index 4968e545aac..6066e37732e 100644 --- a/cmd/consensus/main.go +++ b/cmd/consensus/main.go @@ -10,7 +10,6 @@ import ( "path/filepath" "time" - "github.com/onflow/flow-go/network/channels" "github.com/spf13/pflag" client "github.com/onflow/flow-go-sdk/access/grpc" @@ -59,6 +58,7 @@ import ( "github.com/onflow/flow-go/module/synchronization" "github.com/onflow/flow-go/module/updatable_configs" "github.com/onflow/flow-go/module/validation" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/blocktimer" diff --git a/engine/access/relay/engine.go b/engine/access/relay/engine.go index 06691501a4b..d277b102c0f 100644 --- a/engine/access/relay/engine.go +++ b/engine/access/relay/engine.go @@ -3,12 +3,12 @@ package relay import ( "fmt" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) // Relay engine relays all the messages that are received to the given network for the corresponding channel diff --git a/engine/access/relay/engine_test.go b/engine/access/relay/engine_test.go index bae3037f169..3afe442c093 100644 --- a/engine/access/relay/engine_test.go +++ b/engine/access/relay/engine_test.go @@ -3,12 +3,12 @@ package relay import ( "testing" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/engine/access/relay/example_test.go b/engine/access/relay/example_test.go index d7036a865d6..6574dce4567 100644 --- a/engine/access/relay/example_test.go +++ b/engine/access/relay/example_test.go @@ -4,12 +4,12 @@ import ( "fmt" "math/rand" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine/access/relay" splitterNetwork "github.com/onflow/flow-go/engine/common/splitter/network" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/network/channels" testnet "github.com/onflow/flow-go/utils/unittest/network" ) diff --git a/follower/follower_builder.go b/follower/follower_builder.go index efc38ef70dc..c9bdc672aa3 100644 --- a/follower/follower_builder.go +++ b/follower/follower_builder.go @@ -12,8 +12,6 @@ import ( "github.com/libp2p/go-libp2p-core/routing" dht "github.com/libp2p/go-libp2p-kad-dht" p2ppubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/onflow/flow-go/network/channels" - followereng "github.com/onflow/flow-go/engine/common/follower" finalizer "github.com/onflow/flow-go/module/finalizer/consensus" cborcodec "github.com/onflow/flow-go/network/codec/cbor" @@ -43,6 +41,7 @@ import ( "github.com/onflow/flow-go/module/upstream" "github.com/onflow/flow-go/network" netcache "github.com/onflow/flow-go/network/cache" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/converter" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/keyutils" diff --git a/network/test/echoengine.go b/network/test/echoengine.go index fc24b689989..b5e05cc50a4 100644 --- a/network/test/echoengine.go +++ b/network/test/echoengine.go @@ -6,13 +6,13 @@ import ( "sync" "testing" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/libp2p/message" mockcomponent "github.com/onflow/flow-go/module/component/mock" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) // EchoEngine is a simple engine that is used for testing the correctness of diff --git a/network/test/meshengine.go b/network/test/meshengine.go index 109a72c54af..35d94293759 100644 --- a/network/test/meshengine.go +++ b/network/test/meshengine.go @@ -5,12 +5,12 @@ import ( "sync" "testing" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" mockcomponent "github.com/onflow/flow-go/module/component/mock" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) // MeshEngine is a simple engine that is used for testing the correctness of diff --git a/network/topology/cache.go b/network/topology/cache.go index 1d0938dff23..6b4d904c1e6 100644 --- a/network/topology/cache.go +++ b/network/topology/cache.go @@ -1,11 +1,11 @@ package topology import ( - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/utils/logging" ) From e2d36a09553822ae92e5461958bf198e5b945907 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 28 Jun 2022 16:24:38 -0400 Subject: [PATCH 053/223] fix lint --- cmd/execution_builder.go | 2 +- network/topology/fixedListTopology.go | 2 +- network/topology/fullyConnectedTopology.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/execution_builder.go b/cmd/execution_builder.go index 44be0aae37d..92b30d3f36b 100644 --- a/cmd/execution_builder.go +++ b/cmd/execution_builder.go @@ -16,7 +16,6 @@ import ( badger "github.com/ipfs/go-ds-badger2" "github.com/onflow/cadence/runtime" "github.com/onflow/flow-core-contracts/lib/go/templates" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/shirou/gopsutil/v3/cpu" "github.com/shirou/gopsutil/v3/host" @@ -65,6 +64,7 @@ import ( "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/state_synchronization" chainsync "github.com/onflow/flow-go/module/synchronization" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/compressor" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/state/protocol" diff --git a/network/topology/fixedListTopology.go b/network/topology/fixedListTopology.go index 7baeabd47a9..75fcaefdf9d 100644 --- a/network/topology/fixedListTopology.go +++ b/network/topology/fixedListTopology.go @@ -1,12 +1,12 @@ package topology import ( - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" ) diff --git a/network/topology/fullyConnectedTopology.go b/network/topology/fullyConnectedTopology.go index d8dba83af3a..9a46adbc412 100644 --- a/network/topology/fullyConnectedTopology.go +++ b/network/topology/fullyConnectedTopology.go @@ -1,11 +1,11 @@ package topology import ( - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" ) From d8d1720e804a80a2b9a4ec9417cab8626da9d7e1 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 28 Jun 2022 16:25:30 -0400 Subject: [PATCH 054/223] fix lint --- cmd/observer/node_builder/observer_builder.go | 1 + follower/follower_builder.go | 1 + network/test/blob_service_test.go | 3 ++- network/test/echoengine_test.go | 3 ++- network/test/meshengine_test.go | 3 ++- network/test/middleware_test.go | 3 ++- network/test/testUtil.go | 3 ++- network/topology/cache_test.go | 3 ++- network/topology/helper.go | 3 ++- network/topology/randomizedTopology_test.go | 3 ++- network/topology/topicBasedTopology.go | 3 ++- network/topology/topicBasedTopology_test.go | 3 ++- 12 files changed, 22 insertions(+), 10 deletions(-) diff --git a/cmd/observer/node_builder/observer_builder.go b/cmd/observer/node_builder/observer_builder.go index 1871d930cf9..cad5d8e775d 100644 --- a/cmd/observer/node_builder/observer_builder.go +++ b/cmd/observer/node_builder/observer_builder.go @@ -17,6 +17,7 @@ import ( "github.com/libp2p/go-libp2p-core/routing" dht "github.com/libp2p/go-libp2p-kad-dht" p2ppubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/onflow/flow-go/network/channels" sdkcrypto "github.com/onflow/flow-go-sdk/crypto" diff --git a/follower/follower_builder.go b/follower/follower_builder.go index c9bdc672aa3..e601df65354 100644 --- a/follower/follower_builder.go +++ b/follower/follower_builder.go @@ -12,6 +12,7 @@ import ( "github.com/libp2p/go-libp2p-core/routing" dht "github.com/libp2p/go-libp2p-kad-dht" p2ppubsub "github.com/libp2p/go-libp2p-pubsub" + followereng "github.com/onflow/flow-go/engine/common/follower" finalizer "github.com/onflow/flow-go/module/finalizer/consensus" cborcodec "github.com/onflow/flow-go/network/codec/cbor" diff --git a/network/test/blob_service_test.go b/network/test/blob_service_test.go index db02f4206db..39db5c449db 100644 --- a/network/test/blob_service_test.go +++ b/network/test/blob_service_test.go @@ -11,11 +11,12 @@ import ( "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/sync" blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/suite" "go.uber.org/atomic" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/utils/unittest" "github.com/onflow/flow-go/model/flow" diff --git a/network/test/echoengine_test.go b/network/test/echoengine_test.go index 991509e9c5d..7dad62eb6ba 100644 --- a/network/test/echoengine_test.go +++ b/network/test/echoengine_test.go @@ -10,12 +10,13 @@ import ( "time" "github.com/ipfs/go-log" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/network" diff --git a/network/test/meshengine_test.go b/network/test/meshengine_test.go index e560e1670ae..be68c01963b 100644 --- a/network/test/meshengine_test.go +++ b/network/test/meshengine_test.go @@ -13,12 +13,13 @@ import ( "github.com/ipfs/go-log" pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/libp2p/message" diff --git a/network/test/middleware_test.go b/network/test/middleware_test.go index 79cfc829f89..a14a70ad5ff 100644 --- a/network/test/middleware_test.go +++ b/network/test/middleware_test.go @@ -10,12 +10,13 @@ import ( "github.com/ipfs/go-log" swarm "github.com/libp2p/go-libp2p-swarm" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" mockery "github.com/stretchr/testify/mock" + "github.com/onflow/flow-go/network/channels" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" diff --git a/network/test/testUtil.go b/network/test/testUtil.go index e35a6375409..6ed537be9b6 100644 --- a/network/test/testUtil.go +++ b/network/test/testUtil.go @@ -16,10 +16,11 @@ import ( "github.com/libp2p/go-libp2p-core/routing" dht "github.com/libp2p/go-libp2p-kad-dht" pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" diff --git a/network/topology/cache_test.go b/network/topology/cache_test.go index edfbbf03e73..e74c5c7c343 100644 --- a/network/topology/cache_test.go +++ b/network/topology/cache_test.go @@ -5,11 +5,12 @@ import ( "os" "testing" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/network/mocknetwork" diff --git a/network/topology/helper.go b/network/topology/helper.go index fd4e8276cfd..7b46b1fc509 100644 --- a/network/topology/helper.go +++ b/network/topology/helper.go @@ -5,10 +5,11 @@ import ( "math" "testing" - "github.com/onflow/flow-go/network/channels" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" diff --git a/network/topology/randomizedTopology_test.go b/network/topology/randomizedTopology_test.go index 1a39ff617a3..6df7c67e480 100644 --- a/network/topology/randomizedTopology_test.go +++ b/network/topology/randomizedTopology_test.go @@ -5,11 +5,12 @@ import ( "sort" "testing" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/state/protocol" diff --git a/network/topology/topicBasedTopology.go b/network/topology/topicBasedTopology.go index 0050d219257..d2c2e5d1822 100644 --- a/network/topology/topicBasedTopology.go +++ b/network/topology/topicBasedTopology.go @@ -3,9 +3,10 @@ package topology import ( "fmt" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/network" diff --git a/network/topology/topicBasedTopology_test.go b/network/topology/topicBasedTopology_test.go index 724be3c5b6e..61b8c69bb9a 100644 --- a/network/topology/topicBasedTopology_test.go +++ b/network/topology/topicBasedTopology_test.go @@ -5,11 +5,12 @@ import ( "sort" "testing" - "github.com/onflow/flow-go/network/channels" "github.com/rs/zerolog" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/state/protocol" From 6375e105e6e8d8706f5db58dc513a24c548e465f Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 28 Jun 2022 22:48:08 -0400 Subject: [PATCH 055/223] fix find and replace errors --- cmd/util/cmd/epochs/cmd/templates.go | 6 +- engine/access/rest/request/script_test.go | 2 +- engine/access/rest/scripts_test.go | 4 +- engine/execution/ingestion/engine_test.go | 6 +- fvm/blueprints/epochs.go | 4 +- fvm/fvm_bench_test.go | 36 ++++---- fvm/fvm_blockcontext_test.go | 12 +-- fvm/fvm_signature_test.go | 2 +- fvm/fvm_test.go | 4 +- fvm/handler/programs_test.go | 8 +- .../integration/test/composability_test.go | 6 +- integration/dkg/dkg_emulator_suite.go | 2 +- integration/utils/scripts.go | 10 +-- integration/utils/tx_stats_tracker_test.go | 2 +- ledger/complete/mtrie/trie/trie_test.go | 2 +- model/convert/fixtures/fixture.go | 90 +++++++++---------- model/convert/fixtures_test.go | 90 +++++++++---------- model/flow/identifierList.go | 6 +- model/flow/ledger_test.go | 2 +- model/flow/role.go | 6 +- model/flow/service_event.go | 6 +- module/dkg/client.go | 2 +- module/epochs/epoch_config.go | 2 +- network/channels/channel.go | 6 +- network/message/message.pb.go | 4 +- 25 files changed, 160 insertions(+), 160 deletions(-) diff --git a/cmd/util/cmd/epochs/cmd/templates.go b/cmd/util/cmd/epochs/cmd/templates.go index 97082f85a99..7b05a95bb5f 100644 --- a/cmd/util/cmd/epochs/cmd/templates.go +++ b/cmd/util/cmd/epochs/cmd/templates.go @@ -10,7 +10,7 @@ var deployEpochTransactionTemplate = ` // This transaction is needed to adjust the numViewsInEpoch and numViewsInStakingAuction // value based on the current block when epochs is deployed -transaction(name: Name, +transaction(name: String, currentEpochCounter: UInt64, // this value should be the number of views in the epoch, as computed from the // first and final views of the epoch info from the protocol state @@ -19,7 +19,7 @@ transaction(name: Name, numViewsInDKGPhase: UInt64, numCollectorClusters: UInt16, FLOWsupplyIncreasePercentage: UFix64, - randomSource: Name) { + randomSource: String) { prepare(signer: AuthAccount) { @@ -51,6 +51,6 @@ transaction(name: Name, // the below arguments are unused and are safe to be left empty collectorClusters: [] as [FlowClusterQC.Cluster], clusterQCs: [] as [FlowClusterQC.ClusterQC], - dkgPubKeys: [] as [Name]) + dkgPubKeys: [] as [String]) } }` diff --git a/engine/access/rest/request/script_test.go b/engine/access/rest/request/script_test.go index 359a32c19a9..ab74ae86ea5 100644 --- a/engine/access/rest/request/script_test.go +++ b/engine/access/rest/request/script_test.go @@ -31,7 +31,7 @@ func TestScript_InvalidParse(t *testing.T) { } func TestScript_ValidParse(t *testing.T) { - arg1 := []byte(`{"type": "Name", "value": "hello" }`) + arg1 := []byte(`{"type": "String", "value": "hello" }`) body := strings.NewReader(fmt.Sprintf( `{ "script": "%s", "arguments": ["%s"] }`, validBodyEncoded, diff --git a/engine/access/rest/scripts_test.go b/engine/access/rest/scripts_test.go index 40b9b3fca7a..7e3271c1d81 100644 --- a/engine/access/rest/scripts_test.go +++ b/engine/access/rest/scripts_test.go @@ -39,8 +39,8 @@ func scriptReq(id string, height string, body interface{}) *http.Request { } func TestScripts(t *testing.T) { - validCode := []byte(`pub fun main(foo: Name): Name { return foo }`) - validArgs := []byte(`{ "type": "Name", "value": "hello world" }`) + validCode := []byte(`pub fun main(foo: String): String { return foo }`) + validArgs := []byte(`{ "type": "String", "value": "hello world" }`) validBody := map[string]interface{}{ "script": util.ToBase64(validCode), "arguments": []string{util.ToBase64(validArgs)}, diff --git a/engine/execution/ingestion/engine_test.go b/engine/execution/ingestion/engine_test.go index 8e57296b4bf..1b11292646d 100644 --- a/engine/execution/ingestion/engine_test.go +++ b/engine/execution/ingestion/engine_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - engineCommon "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/engine/execution" @@ -147,7 +147,7 @@ func runWithEngine(t *testing.T, f func(testingContext)) { request.EXPECT().Force().Return().AnyTimes() - net.EXPECT().Register(gomock.Eq(engineCommon.SyncExecution), gomock.AssignableToTypeOf(engine)).Return(syncConduit, nil) + net.EXPECT().Register(gomock.Eq(channels.SyncExecution), gomock.AssignableToTypeOf(engine)).Return(syncConduit, nil) deltas, err := NewDeltas(1000) require.NoError(t, err) @@ -1185,7 +1185,7 @@ func newIngestionEngine(t *testing.T, ps *mocks.ProtocolState, es *mocks.Executi request := module.NewMockRequester(ctrl) syncConduit := &mocknetwork.Conduit{} var engine *Engine - net.EXPECT().Register(gomock.Eq(engineCommon.SyncExecution), gomock.AssignableToTypeOf(engine)).Return(syncConduit, nil) + net.EXPECT().Register(gomock.Eq(channels.SyncExecution), gomock.AssignableToTypeOf(engine)).Return(syncConduit, nil) // generates signing identity including staking key for signing seed := make([]byte, crypto.KeyGenSeedMinLenBLSBLS12381) diff --git a/fvm/blueprints/epochs.go b/fvm/blueprints/epochs.go index 9169e2cc237..90c368ae9e1 100644 --- a/fvm/blueprints/epochs.go +++ b/fvm/blueprints/epochs.go @@ -28,7 +28,7 @@ transaction { const deployEpochTransactionTemplate = ` import FlowClusterQC from 0x%s -transaction(clusterWeights: [{Name: UInt64}]) { +transaction(clusterWeights: [{String: UInt64}]) { prepare(serviceAccount: AuthAccount) { // first, construct Cluster objects from cluster weights @@ -52,7 +52,7 @@ transaction(clusterWeights: [{Name: UInt64}]) { collectorClusters: clusters, // NOTE: clusterQCs and dkgPubKeys are empty because these initial values are not used clusterQCs: [] as [FlowClusterQC.ClusterQC], - dkgPubKeys: [] as [Name], + dkgPubKeys: [] as [String], ) } } diff --git a/fvm/fvm_bench_test.go b/fvm/fvm_bench_test.go index 60fdf34d3c9..e8c307fe631 100644 --- a/fvm/fvm_bench_test.go +++ b/fvm/fvm_bench_test.go @@ -87,9 +87,9 @@ func (account *TestBenchAccount) AddArrayToStorage(b *testing.B, blockExec TestB serviceAccount := blockExec.ServiceAccount(b) txBody := flow.NewTransactionBody(). SetScript([]byte(` - transaction(list: [Name]) { + transaction(list: [String]) { prepare(acct: AuthAccount) { - acct.load<[Name]>(from: /storage/test) + acct.load<[String]>(from: /storage/test) acct.save(list, to: /storage/test) } execute {} @@ -446,13 +446,13 @@ func BenchmarkRuntimeTransaction(b *testing.B) { }) b.Run("load and save empty string on signers address", func(b *testing.B) { benchTransaction(b, templateTx(100, ` - signer.load(from: /storage/testpath) + signer.load(from: /storage/testpath) signer.save("", to: /storage/testpath) `)) }) b.Run("load and save long string on signers address", func(b *testing.B) { benchTransaction(b, templateTx(100, fmt.Sprintf(` - signer.load(from: /storage/testpath) + signer.load(from: /storage/testpath) signer.save("%s", to: /storage/testpath) `, longString))) }) @@ -467,7 +467,7 @@ func BenchmarkRuntimeTransaction(b *testing.B) { }) b.Run("borrow array from storage", func(b *testing.B) { benchTransaction(b, templateTx(100, ` - let strings = signer.borrow<&[Name]>(from: /storage/test)! + let strings = signer.borrow<&[String]>(from: /storage/test)! var i = 0 while (i < strings.length) { log(strings[i]) @@ -477,7 +477,7 @@ func BenchmarkRuntimeTransaction(b *testing.B) { }) b.Run("copy array from storage", func(b *testing.B) { benchTransaction(b, templateTx(100, ` - let strings = signer.copy<[Name]>(from: /storage/test)! + let strings = signer.copy<[String]>(from: /storage/test)! var i = 0 while (i < strings.length) { log(strings[i]) @@ -697,7 +697,7 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc pub contract BatchNFT: NonFungibleToken { pub event ContractInitialized() - pub event PlayCreated(id: UInt32, metadata: {Name:Name}) + pub event PlayCreated(id: UInt32, metadata: {String:String}) pub event NewSeriesStarted(newCurrentSeries: UInt32) pub event SetCreated(setID: UInt32, series: UInt32) pub event PlayAddedToSet(setID: UInt32, playID: UInt32) @@ -717,9 +717,9 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc pub struct Play { pub let playID: UInt32 - pub let metadata: {Name: Name} + pub let metadata: {String: String} - init(metadata: {Name: Name}) { + init(metadata: {String: String}) { pre { metadata.length != 0: "New Play Metadata cannot be empty" } @@ -733,9 +733,9 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc pub struct SetData { pub let setID: UInt32 - pub let name: Name + pub let name: String pub let series: UInt32 - init(name: Name) { + init(name: String) { pre { name.length > 0: "New Set name cannot be empty" } @@ -754,7 +754,7 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc pub var locked: Bool pub var numberMintedPerPlay: {UInt32: UInt32} - init(name: Name) { + init(name: String) { self.setID = BatchNFT.nextSetID self.plays = [] self.retired = {} @@ -869,7 +869,7 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc } pub resource Admin { - pub fun createPlay(metadata: {Name: Name}): UInt32 { + pub fun createPlay(metadata: {String: String}): UInt32 { var newPlay = Play(metadata: metadata) let newID = newPlay.playID @@ -878,7 +878,7 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc return newID } - pub fun createSet(name: Name) { + pub fun createSet(name: String) { var newSet <- create Set(name: name) BatchNFT.sets[newSet.setID] <-! newSet @@ -993,11 +993,11 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc return BatchNFT.playDatas.values } - pub fun getPlayMetaData(playID: UInt32): {Name: Name}? { + pub fun getPlayMetaData(playID: UInt32): {String: String}? { return self.playDatas[playID]?.metadata } - pub fun getPlayMetaDataByField(playID: UInt32, field: Name): Name? { + pub fun getPlayMetaDataByField(playID: UInt32, field: String): String? { if let play = BatchNFT.playDatas[playID] { return play.metadata[field] } else { @@ -1005,7 +1005,7 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc } } - pub fun getSetName(setID: UInt32): Name? { + pub fun getSetName(setID: UInt32): String? { return BatchNFT.setDatas[setID]?.name } @@ -1013,7 +1013,7 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc return BatchNFT.setDatas[setID]?.series } - pub fun getSetIDsByName(setName: Name): [UInt32]? { + pub fun getSetIDsByName(setName: String): [UInt32]? { var setIDs: [UInt32] = [] for setData in BatchNFT.setDatas.values { diff --git a/fvm/fvm_blockcontext_test.go b/fvm/fvm_blockcontext_test.go index 3cd4bba197a..85b6944b150 100644 --- a/fvm/fvm_blockcontext_test.go +++ b/fvm/fvm_blockcontext_test.go @@ -93,7 +93,7 @@ func filterAccountCreatedEvents(events []flow.Event) []flow.Event { const auditContractForDeploymentTransactionTemplate = ` import FlowContractAudits from 0x%s -transaction(deployAddress: Address, code: Name) { +transaction(deployAddress: Address, code: String) { prepare(serviceAccount: AuthAccount) { let auditorAdmin = serviceAccount.borrow<&FlowContractAudits.Administrator>(from: FlowContractAudits.AdminStoragePath) @@ -333,7 +333,7 @@ func TestBlockContext_DeployContract(t *testing.T) { SetScript([]byte(` transaction { prepare(signer: AuthAccount) { - var s : Name = "" + var s : String = "" for name in signer.contracts.names { s = s.concat(name).concat(",") } @@ -756,7 +756,7 @@ func TestBlockContext_ExecuteTransaction_WithArguments(t *testing.T) { }, { label: "Multiple parameters", - script: `transaction(x: Int, y: Name) { execute { log(x); log(y) } }`, + script: `transaction(x: Int, y: String) { execute { log(x); log(y) } }`, args: [][]byte{arg1, arg2}, check: func(t *testing.T, tx *fvm.TransactionProcedure) { require.NoError(t, tx.Err) @@ -768,7 +768,7 @@ func TestBlockContext_ExecuteTransaction_WithArguments(t *testing.T) { { label: "Parameters and authorizer", script: ` - transaction(x: Int, y: Name) { + transaction(x: Int, y: String) { prepare(acct: AuthAccount) { log(acct.address) } execute { log(x); log(y) } }`, @@ -899,7 +899,7 @@ func TestBlockContext_ExecuteTransaction_StorageLimit(t *testing.T) { script := fmt.Sprintf(` access(all) contract Container { access(all) resource Counter { - pub var longString: Name + pub var longString: String init() { self.longString = "%s" } @@ -1014,7 +1014,7 @@ func TestBlockContext_ExecuteTransaction_InteractionLimitReached(t *testing.T) { script := fmt.Sprintf(` access(all) contract Container { access(all) resource Counter { - pub var longString: Name + pub var longString: String init() { self.longString = "%s" } diff --git a/fvm/fvm_signature_test.go b/fvm/fvm_signature_test.go index 97976adcec3..53d17f71f4c 100644 --- a/fvm/fvm_signature_test.go +++ b/fvm/fvm_signature_test.go @@ -768,7 +768,7 @@ func TestBLSMultiSignature(t *testing.T) { publicKeys: [[UInt8]], signatures: [[UInt8]], message: [UInt8], - tag: Name, + tag: String, ): Bool { let pks: [PublicKey] = [] for pk in publicKeys { diff --git a/fvm/fvm_test.go b/fvm/fvm_test.go index 694d0ea391f..225b6cb218c 100644 --- a/fvm/fvm_test.go +++ b/fvm/fvm_test.go @@ -184,7 +184,7 @@ func TestHashing(t *testing.T) { ` import Crypto - pub fun main(data: [UInt8], tag: Name): [UInt8] { + pub fun main(data: [UInt8], tag: String): [UInt8] { return Crypto.hashWithTag(data, tag: tag, algorithm: HashAlgorithm.%s) } `, hashName)) @@ -467,7 +467,7 @@ func TestEventLimits(t *testing.T) { testContract := ` access(all) contract TestContract { - access(all) event LargeEvent(value: Int256, str: Name, list: [UInt256], dic: {Name: Name}) + access(all) event LargeEvent(value: Int256, str: String, list: [UInt256], dic: {String: String}) access(all) fun EmitEvent() { var s: Int256 = 1024102410241024 var i = 0 diff --git a/fvm/handler/programs_test.go b/fvm/handler/programs_test.go index 6507e6b5218..18af736766d 100644 --- a/fvm/handler/programs_test.go +++ b/fvm/handler/programs_test.go @@ -39,7 +39,7 @@ func Test_Programs(t *testing.T) { contractA0Code := ` pub contract A { - pub fun hello(): Name { + pub fun hello(): String { return "bad version" } } @@ -47,7 +47,7 @@ func Test_Programs(t *testing.T) { contractACode := ` pub contract A { - pub fun hello(): Name { + pub fun hello(): String { return "hello from A" } } @@ -57,7 +57,7 @@ func Test_Programs(t *testing.T) { import A from 0xa pub contract B { - pub fun hello(): Name { + pub fun hello(): String { return "hello from B but also ".concat(A.hello()) } } @@ -67,7 +67,7 @@ func Test_Programs(t *testing.T) { import B from 0xb pub contract C { - pub fun hello(): Name { + pub fun hello(): String { return "hello from C, ".concat(B.hello()) } } diff --git a/insecure/integration/test/composability_test.go b/insecure/integration/test/composability_test.go index f28bc04c5d0..effec56b39a 100644 --- a/insecure/integration/test/composability_test.go +++ b/insecure/integration/test/composability_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" - flownet "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/engine/testutil" "github.com/onflow/flow-go/insecure" @@ -56,7 +56,7 @@ func TestCorruptibleConduitFrameworkHappyPath(t *testing.T) { hub := stub.NewNetworkHub() originalEvent := &message.TestMessage{Text: "this is a test message"} - testChannel := flownet.Channel("test-channel") + testChannel := channels.Channel("test-channel") // corrupted node network corruptedEngine := &network.Engine{} @@ -75,7 +75,7 @@ func TestCorruptibleConduitFrameworkHappyPath(t *testing.T) { wg := &sync.WaitGroup{} wg.Add(1) - honestEngine.OnProcess(func(channel flownet.Channel, originId flow.Identifier, event interface{}) error { + honestEngine.OnProcess(func(channel channels.Channel, originId flow.Identifier, event interface{}) error { // implementing the process logic of the honest engine on reception of message from underlying network. require.Equal(t, testChannel, channel) // event must arrive at the channel set by orchestrator. require.Equal(t, corruptedIdentity.NodeID, originId) // origin id of the message must be the corrupted node. diff --git a/integration/dkg/dkg_emulator_suite.go b/integration/dkg/dkg_emulator_suite.go index ec0f615814a..603bd1116ff 100644 --- a/integration/dkg/dkg_emulator_suite.go +++ b/integration/dkg/dkg_emulator_suite.go @@ -393,7 +393,7 @@ func (s *DKGSuite) getResult() []string { script := fmt.Sprintf(` import FlowDKG from 0x%s - pub fun main(): [Name?]? { + pub fun main(): [String?]? { return FlowDKG.dkgCompleted() } `, s.env.DkgAddress, diff --git a/integration/utils/scripts.go b/integration/utils/scripts.go index 04a72f95fd7..689ed3e3ce3 100644 --- a/integration/utils/scripts.go +++ b/integration/utils/scripts.go @@ -127,7 +127,7 @@ access(all) contract MyFavContract { } // items - access(all) event NewItemAddedEvent(id: UInt32, metadata: {Name: Name}) + access(all) event NewItemAddedEvent(id: UInt32, metadata: {String: String}) access(self) var itemCounter: UInt32 @@ -135,9 +135,9 @@ access(all) contract MyFavContract { pub let itemID: UInt32 - pub let metadata: {Name: Name} + pub let metadata: {String: String} - init(_ metadata: {Name: Name}) { + init(_ metadata: {String: String}) { self.itemID = MyFavContract.itemCounter self.metadata = metadata @@ -151,7 +151,7 @@ access(all) contract MyFavContract { access(self) var items: [Item] - access(all) fun AddItem(_ metadata: {Name: Name}){ + access(all) fun AddItem(_ metadata: {String: String}){ let item = Item(metadata) self.items.append(item) } @@ -181,7 +181,7 @@ access(all) contract MyFavContract { log(i) } - access(all) event LargeEvent(value: Int256, str: Name, list: [UInt256], dic: {Name: Name}) + access(all) event LargeEvent(value: Int256, str: String, list: [UInt256], dic: {String: String}) // event heavy function access(all) fun EventHeavy() { diff --git a/integration/utils/tx_stats_tracker_test.go b/integration/utils/tx_stats_tracker_test.go index dd127b1a60f..f5dc7567db5 100644 --- a/integration/utils/tx_stats_tracker_test.go +++ b/integration/utils/tx_stats_tracker_test.go @@ -40,7 +40,7 @@ func TestTxStatsTracker(t *testing.T) { assert.InDelta(t, 10., st.TTS.Max(), 1.) } -// TestTxStatsTrackerString tests the Name() method. +// TestTxStatsTrackerString tests the String() method. func TestTxStatsTrackerString(t *testing.T) { st := NewTxStatsTracker() assert.Equal(t, "[]\n[]\n[]\n", st.String()) diff --git a/ledger/complete/mtrie/trie/trie_test.go b/ledger/complete/mtrie/trie/trie_test.go index 09113c58bd3..ecf02cbb94c 100644 --- a/ledger/complete/mtrie/trie/trie_test.go +++ b/ledger/complete/mtrie/trie/trie_test.go @@ -32,7 +32,7 @@ func Test_EmptyTrie(t *testing.T) { expectedRootHashHex := "568f4ec740fe3b5de88034cb7b1fbddb41548b068f31aebc8ae9189e429c5749" require.Equal(t, expectedRootHashHex, hashToString(rootHash)) - // check Name() method does not panic: + // check String() method does not panic: _ = emptyTrie.String() } diff --git a/model/convert/fixtures/fixture.go b/model/convert/fixtures/fixture.go index 03b6bdae2eb..cb59647824a 100644 --- a/model/convert/fixtures/fixture.go +++ b/model/convert/fixtures/fixture.go @@ -173,7 +173,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000001" } }, @@ -187,21 +187,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "Name", + "type": "String", "value": "1.flow.com" } }, { "name": "networkingKey", "value": { - "type": "Name", + "type": "String", "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" } }, { "name": "stakingKey", "value": { - "type": "Name", + "type": "String", "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" } }, @@ -279,7 +279,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000002" } }, @@ -293,21 +293,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "Name", + "type": "String", "value": "2.flow.com" } }, { "name": "networkingKey", "value": { - "type": "Name", + "type": "String", "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" } }, { "name": "stakingKey", "value": { - "type": "Name", + "type": "String", "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" } }, @@ -385,7 +385,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000003" } }, @@ -399,21 +399,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "Name", + "type": "String", "value": "3.flow.com" } }, { "name": "networkingKey", "value": { - "type": "Name", + "type": "String", "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" } }, { "name": "stakingKey", "value": { - "type": "Name", + "type": "String", "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" } }, @@ -491,7 +491,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000004" } }, @@ -505,21 +505,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "Name", + "type": "String", "value": "4.flow.com" } }, { "name": "networkingKey", "value": { - "type": "Name", + "type": "String", "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" } }, { "name": "stakingKey", "value": { - "type": "Name", + "type": "String", "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" } }, @@ -597,7 +597,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000011" } }, @@ -611,21 +611,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "Name", + "type": "String", "value": "11.flow.com" } }, { "name": "networkingKey", "value": { - "type": "Name", + "type": "String", "value": "cfdfe8e4362c8f79d11772cb7277ab16e5033a63e8dd5d34caf1b041b77e5b2d63c2072260949ccf8907486e4cfc733c8c42ca0e4e208f30470b0d950856cd47" } }, { "name": "stakingKey", "value": { - "type": "Name", + "type": "String", "value": "8207559cd7136af378bba53a8f0196dee3849a3ab02897c1995c3e3f6ca0c4a776c3ae869d1ddbb473090054be2400ad06d7910aa2c5d1780220fdf3765a3c1764bce10c6fe66a5a2be51a422e878518bd750424bb56b8a0ecf0f8ad2057e83f" } }, @@ -703,7 +703,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000021" } }, @@ -717,21 +717,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "Name", + "type": "String", "value": "21.flow.com" } }, { "name": "networkingKey", "value": { - "type": "Name", + "type": "String", "value": "d64318ba0dbf68f3788fc81c41d507c5822bf53154530673127c66f50fe4469ccf1a054a868a9f88506a8999f2386d86fcd2b901779718cba4fb53c2da258f9e" } }, { "name": "stakingKey", "value": { - "type": "Name", + "type": "String", "value": "880b162b7ec138b36af401d07868cb08d25746d905395edbb4625bdf105d4bb2b2f4b0f4ae273a296a6efefa7ce9ccb914e39947ce0e83745125cab05d62516076ff0173ed472d3791ccef937597c9ea12381d76f547a092a4981d77ff3fba83" } }, @@ -809,7 +809,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000031" } }, @@ -823,21 +823,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "Name", + "type": "String", "value": "31.flow.com" } }, { "name": "networkingKey", "value": { - "type": "Name", + "type": "String", "value": "697241208dcc9142b6f53064adc8ff1c95760c68beb2ba083c1d005d40181fd7a1b113274e0163c053a3addd47cd528ec6a1f190cf465aac87c415feaae011ae" } }, { "name": "stakingKey", "value": { - "type": "Name", + "type": "String", "value": "b1f97d0a06020eca97352e1adde72270ee713c7daf58da7e74bf72235321048b4841bdfc28227964bf18e371e266e32107d238358848bcc5d0977a0db4bda0b4c33d3874ff991e595e0f537c7b87b4ddce92038ebc7b295c9ea20a1492302aa7" } }, @@ -948,7 +948,7 @@ var epochSetupFixtureJSON = ` "value": [ { "key": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000001" }, "value": { @@ -958,7 +958,7 @@ var epochSetupFixtureJSON = ` }, { "key": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000002" }, "value": { @@ -1005,7 +1005,7 @@ var epochSetupFixtureJSON = ` "value": [ { "key": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000003" }, "value": { @@ -1015,7 +1015,7 @@ var epochSetupFixtureJSON = ` }, { "key": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000004" }, "value": { @@ -1049,7 +1049,7 @@ var epochSetupFixtureJSON = ` { "name": "randomSource", "value": { - "type": "Name", + "type": "String", "value": "01020304" } }, @@ -1115,11 +1115,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "Name", + "type": "String", "value": "a39cd1e1bf7e2fb0609b7388ce5215a6a4c01eef2aee86e1a007faa28a6b2a3dc876e11bb97cdb26c3846231d2d01e4d" }, { - "type": "Name", + "type": "String", "value": "91673ad9c717d396c9a0953617733c128049ac1a639653d4002ab245b121df1939430e313bcbfd06948f6a281f6bf853" } ] @@ -1128,7 +1128,7 @@ var epochCommitFixtureJSON = ` { "name": "voteMessage", "value": { - "type": "Name", + "type": "String", "value": "irrelevant_for_these_purposes" } }, @@ -1138,11 +1138,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000001" }, { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000002" } ] @@ -1169,11 +1169,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "Name", + "type": "String", "value": "b2bff159971852ed63e72c37991e62c94822e52d4fdcd7bf29aaf9fb178b1c5b4ce20dd9594e029f3574cb29533b857a" }, { - "type": "Name", + "type": "String", "value": "9931562f0248c9195758da3de4fb92f24fa734cbc20c0cb80280163560e0e0348f843ac89ecbd3732e335940c1e8dccb" } ] @@ -1182,7 +1182,7 @@ var epochCommitFixtureJSON = ` { "name": "voteMessage", "value": { - "type": "Name", + "type": "String", "value": "irrelevant_for_these_purposes" } }, @@ -1192,11 +1192,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000003" }, { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000004" } ] @@ -1214,11 +1214,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "Name", + "type": "String", "value": "8c588266db5f5cda629e83f8aa04ae9413593fac19e4865d06d291c9d14fbdd9bdb86a7a12f9ef8590c79cb635e3163315d193087e9336092987150d0cd2b14ac6365f7dc93eec573752108b8c12368abb65f0652d9f644e5aed611c37926950" }, { - "type": "Name", + "type": "String", "value": "87a339e4e5c74f089da20a33f515d8c8f4464ab53ede5a74aa2432cd1ae66d522da0c122249ee176cd747ddc83ca81090498389384201614caf51eac392c1c0a916dfdcfbbdf7363f9552b6468434add3d3f6dc91a92bbe3ee368b59b7828488" } ] diff --git a/model/convert/fixtures_test.go b/model/convert/fixtures_test.go index f26d07f14ce..5c99d8709ee 100644 --- a/model/convert/fixtures_test.go +++ b/model/convert/fixtures_test.go @@ -173,7 +173,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000001" } }, @@ -187,21 +187,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "Name", + "type": "String", "value": "1.flow.com" } }, { "name": "networkingKey", "value": { - "type": "Name", + "type": "String", "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" } }, { "name": "stakingKey", "value": { - "type": "Name", + "type": "String", "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" } }, @@ -279,7 +279,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000002" } }, @@ -293,21 +293,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "Name", + "type": "String", "value": "2.flow.com" } }, { "name": "networkingKey", "value": { - "type": "Name", + "type": "String", "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" } }, { "name": "stakingKey", "value": { - "type": "Name", + "type": "String", "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" } }, @@ -385,7 +385,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000003" } }, @@ -399,21 +399,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "Name", + "type": "String", "value": "3.flow.com" } }, { "name": "networkingKey", "value": { - "type": "Name", + "type": "String", "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" } }, { "name": "stakingKey", "value": { - "type": "Name", + "type": "String", "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" } }, @@ -491,7 +491,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000004" } }, @@ -505,21 +505,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "Name", + "type": "String", "value": "4.flow.com" } }, { "name": "networkingKey", "value": { - "type": "Name", + "type": "String", "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" } }, { "name": "stakingKey", "value": { - "type": "Name", + "type": "String", "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" } }, @@ -597,7 +597,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000011" } }, @@ -611,21 +611,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "Name", + "type": "String", "value": "11.flow.com" } }, { "name": "networkingKey", "value": { - "type": "Name", + "type": "String", "value": "cfdfe8e4362c8f79d11772cb7277ab16e5033a63e8dd5d34caf1b041b77e5b2d63c2072260949ccf8907486e4cfc733c8c42ca0e4e208f30470b0d950856cd47" } }, { "name": "stakingKey", "value": { - "type": "Name", + "type": "String", "value": "8207559cd7136af378bba53a8f0196dee3849a3ab02897c1995c3e3f6ca0c4a776c3ae869d1ddbb473090054be2400ad06d7910aa2c5d1780220fdf3765a3c1764bce10c6fe66a5a2be51a422e878518bd750424bb56b8a0ecf0f8ad2057e83f" } }, @@ -703,7 +703,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000021" } }, @@ -717,21 +717,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "Name", + "type": "String", "value": "21.flow.com" } }, { "name": "networkingKey", "value": { - "type": "Name", + "type": "String", "value": "d64318ba0dbf68f3788fc81c41d507c5822bf53154530673127c66f50fe4469ccf1a054a868a9f88506a8999f2386d86fcd2b901779718cba4fb53c2da258f9e" } }, { "name": "stakingKey", "value": { - "type": "Name", + "type": "String", "value": "880b162b7ec138b36af401d07868cb08d25746d905395edbb4625bdf105d4bb2b2f4b0f4ae273a296a6efefa7ce9ccb914e39947ce0e83745125cab05d62516076ff0173ed472d3791ccef937597c9ea12381d76f547a092a4981d77ff3fba83" } }, @@ -809,7 +809,7 @@ var epochSetupFixtureJSON = ` { "name": "id", "value": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000031" } }, @@ -823,21 +823,21 @@ var epochSetupFixtureJSON = ` { "name": "networkingAddress", "value": { - "type": "Name", + "type": "String", "value": "31.flow.com" } }, { "name": "networkingKey", "value": { - "type": "Name", + "type": "String", "value": "697241208dcc9142b6f53064adc8ff1c95760c68beb2ba083c1d005d40181fd7a1b113274e0163c053a3addd47cd528ec6a1f190cf465aac87c415feaae011ae" } }, { "name": "stakingKey", "value": { - "type": "Name", + "type": "String", "value": "b1f97d0a06020eca97352e1adde72270ee713c7daf58da7e74bf72235321048b4841bdfc28227964bf18e371e266e32107d238358848bcc5d0977a0db4bda0b4c33d3874ff991e595e0f537c7b87b4ddce92038ebc7b295c9ea20a1492302aa7" } }, @@ -948,7 +948,7 @@ var epochSetupFixtureJSON = ` "value": [ { "key": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000001" }, "value": { @@ -958,7 +958,7 @@ var epochSetupFixtureJSON = ` }, { "key": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000002" }, "value": { @@ -1005,7 +1005,7 @@ var epochSetupFixtureJSON = ` "value": [ { "key": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000003" }, "value": { @@ -1015,7 +1015,7 @@ var epochSetupFixtureJSON = ` }, { "key": { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000004" }, "value": { @@ -1049,7 +1049,7 @@ var epochSetupFixtureJSON = ` { "name": "randomSource", "value": { - "type": "Name", + "type": "String", "value": "01020304" } }, @@ -1115,11 +1115,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "Name", + "type": "String", "value": "a39cd1e1bf7e2fb0609b7388ce5215a6a4c01eef2aee86e1a007faa28a6b2a3dc876e11bb97cdb26c3846231d2d01e4d" }, { - "type": "Name", + "type": "String", "value": "91673ad9c717d396c9a0953617733c128049ac1a639653d4002ab245b121df1939430e313bcbfd06948f6a281f6bf853" } ] @@ -1128,7 +1128,7 @@ var epochCommitFixtureJSON = ` { "name": "voteMessage", "value": { - "type": "Name", + "type": "String", "value": "irrelevant_for_these_purposes" } }, @@ -1138,11 +1138,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000001" }, { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000002" } ] @@ -1169,11 +1169,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "Name", + "type": "String", "value": "b2bff159971852ed63e72c37991e62c94822e52d4fdcd7bf29aaf9fb178b1c5b4ce20dd9594e029f3574cb29533b857a" }, { - "type": "Name", + "type": "String", "value": "9931562f0248c9195758da3de4fb92f24fa734cbc20c0cb80280163560e0e0348f843ac89ecbd3732e335940c1e8dccb" } ] @@ -1182,7 +1182,7 @@ var epochCommitFixtureJSON = ` { "name": "voteMessage", "value": { - "type": "Name", + "type": "String", "value": "irrelevant_for_these_purposes" } }, @@ -1192,11 +1192,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000003" }, { - "type": "Name", + "type": "String", "value": "0000000000000000000000000000000000000000000000000000000000000004" } ] @@ -1214,11 +1214,11 @@ var epochCommitFixtureJSON = ` "type": "Array", "value": [ { - "type": "Name", + "type": "String", "value": "8c588266db5f5cda629e83f8aa04ae9413593fac19e4865d06d291c9d14fbdd9bdb86a7a12f9ef8590c79cb635e3163315d193087e9336092987150d0cd2b14ac6365f7dc93eec573752108b8c12368abb65f0652d9f644e5aed611c37926950" }, { - "type": "Name", + "type": "String", "value": "87a339e4e5c74f089da20a33f515d8c8f4464ab53ede5a74aa2432cd1ae66d522da0c122249ee176cd747ddc83ca81090498389384201614caf51eac392c1c0a916dfdcfbbdf7363f9552b6468434add3d3f6dc91a92bbe3ee368b59b7828488" } ] diff --git a/model/flow/identifierList.go b/model/flow/identifierList.go index 964fd57a126..33ce2447707 100644 --- a/model/flow/identifierList.go +++ b/model/flow/identifierList.go @@ -12,7 +12,7 @@ import ( type IdentifierList []Identifier // Len returns length of the IdentiferList in the number of stored identifiers. -// It satisfies the sort.Type making the IdentifierList sortable. +// It satisfies the sort.Interface making the IdentifierList sortable. func (il IdentifierList) Len() int { return len(il) } @@ -28,7 +28,7 @@ func (il IdentifierList) Lookup() map[Identifier]struct{} { // Less returns true if element i in the IdentifierList is less than j based on its identifier. // Otherwise it returns true. -// It satisfies the sort.Type making the IdentifierList sortable. +// It satisfies the sort.Interface making the IdentifierList sortable. func (il IdentifierList) Less(i, j int) bool { // bytes package already implements Comparable for []byte. switch bytes.Compare(il[i][:], il[j][:]) { @@ -43,7 +43,7 @@ func (il IdentifierList) Less(i, j int) bool { } // Swap swaps the element i and j in the IdentifierList. -// It satisfies the sort.Type making the IdentifierList sortable. +// It satisfies the sort.Interface making the IdentifierList sortable. func (il IdentifierList) Swap(i, j int) { il[j], il[i] = il[i], il[j] } diff --git a/model/flow/ledger_test.go b/model/flow/ledger_test.go index e70e7e2471a..ec612b0d86f 100644 --- a/model/flow/ledger_test.go +++ b/model/flow/ledger_test.go @@ -8,7 +8,7 @@ import ( ) // this benchmark can run with this command: -// go test -run=Name -bench=. +// go test -run=String -bench=. // this is to prevent lint errors var length int diff --git a/model/flow/role.go b/model/flow/role.go index 11acfdd0920..f138a185d75 100644 --- a/model/flow/role.go +++ b/model/flow/role.go @@ -110,20 +110,20 @@ func (r RoleList) Union(other RoleList) RoleList { } // Len returns length of the RoleList in the number of stored roles. -// It satisfies the sort.Type making the RoleList sortable. +// It satisfies the sort.Interface making the RoleList sortable. func (r RoleList) Len() int { return len(r) } // Less returns true if element i in the RoleList is less than j based on the numerical value of its role. // Otherwise it returns true. -// It satisfies the sort.Type making the RoleList sortable. +// It satisfies the sort.Interface making the RoleList sortable. func (r RoleList) Less(i, j int) bool { return r[i] < r[j] } // Swap swaps the element i and j in the RoleList. -// It satisfies the sort.Type making the RoleList sortable. +// It satisfies the sort.Interface making the RoleList sortable. func (r RoleList) Swap(i, j int) { r[i], r[j] = r[j], r[i] } diff --git a/model/flow/service_event.go b/model/flow/service_event.go index d1e098505c8..c4d183084cb 100644 --- a/model/flow/service_event.go +++ b/model/flow/service_event.go @@ -56,7 +56,7 @@ func (se *ServiceEvent) UnmarshalJSON(b []byte) error { return err } - tp, ok := enc["Type"].(string) + tp, ok := enc["Interface"].(string) if !ok { return fmt.Errorf("missing type key") } @@ -106,7 +106,7 @@ func (se *ServiceEvent) UnmarshalMsgpack(b []byte) error { return err } - tp, ok := enc["Type"].(string) + tp, ok := enc["Interface"].(string) if !ok { return fmt.Errorf("missing type key") } @@ -156,7 +156,7 @@ func (se *ServiceEvent) UnmarshalCBOR(b []byte) error { return err } - tp, ok := enc["Type"].(string) + tp, ok := enc["Interface"].(string) if !ok { return fmt.Errorf("missing type key") } diff --git a/module/dkg/client.go b/module/dkg/client.go index f7902c43764..e8401f23736 100644 --- a/module/dkg/client.go +++ b/module/dkg/client.go @@ -256,7 +256,7 @@ func (c *Client) SubmitResult(groupPublicKey crypto.PublicKey, publicKeys []cryp // trim0x trims the `0x` if it exists from a hexadecimal string // This method is required as the DKG contract expects key lengths of 192 bytes -// the `PublicKey.Name()` method returns the hexadecimal string representation of the +// the `PublicKey.String()` method returns the hexadecimal string representation of the // public key prefixed with `0x` resulting in length of 194 bytes. func trim0x(hexString string) string { diff --git a/module/epochs/epoch_config.go b/module/epochs/epoch_config.go index f9dbae46c06..179add16f79 100644 --- a/module/epochs/epoch_config.go +++ b/module/epochs/epoch_config.go @@ -42,7 +42,7 @@ func DefaultEpochConfig() EpochConfig { // transaction argument for the deployEpoch transaction used during execution // state bootstrapping. // -// The resulting argument has type [{Name: UInt64}] which represents a list +// The resulting argument has type [{String: UInt64}] which represents a list // of weight mappings for each cluster. The full Cluster struct is constructed // within the transaction in Cadence for simplicity here. // diff --git a/network/channels/channel.go b/network/channels/channel.go index 10a9c92ea9a..9796acaee29 100644 --- a/network/channels/channel.go +++ b/network/channels/channel.go @@ -17,20 +17,20 @@ func (c Channel) String() string { } // Len returns length of the ChannelList in the number of stored Channels. -// It satisfies the sort.Type making the ChannelList sortable. +// It satisfies the sort.Interface making the ChannelList sortable. func (cl ChannelList) Len() int { return len(cl) } // Less returns true if element i in the ChannelList is less than j based on the numerical value of its Channel. // Otherwise it returns true. -// It satisfies the sort.Type making the ChannelList sortable. +// It satisfies the sort.Interface making the ChannelList sortable. func (cl ChannelList) Less(i, j int) bool { return cl[i] < cl[j] } // Swap swaps the element i and j in the ChannelList. -// It satisfies the sort.Type making the ChannelList sortable. +// It satisfies the sort.Interface making the ChannelList sortable. func (cl ChannelList) Swap(i, j int) { cl[i], cl[j] = cl[j], cl[i] } diff --git a/network/message/message.pb.go b/network/message/message.pb.go index 42d05c2c6a7..43d1abcc9ce 100644 --- a/network/message/message.pb.go +++ b/network/message/message.pb.go @@ -30,7 +30,7 @@ type Message struct { OriginID []byte `protobuf:"bytes,3,opt,name=OriginID,proto3" json:"OriginID,omitempty"` // Deprecated: Do not use. TargetIDs [][]byte `protobuf:"bytes,4,rep,name=TargetIDs,proto3" json:"TargetIDs,omitempty"` Payload []byte `protobuf:"bytes,5,opt,name=Payload,proto3" json:"Payload,omitempty"` - Type string `protobuf:"bytes,6,opt,name=Type,proto3" json:"Type,omitempty"` + Type string `protobuf:"bytes,6,opt,name=Interface,proto3" json:"Interface,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -457,7 +457,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Interface", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { From bf4e92137609fc0a3aeb28ec15a93bb2df9b8e98 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 29 Jun 2022 09:46:03 -0400 Subject: [PATCH 056/223] fix goland refactor rename errors --- model/flow/service_event.go | 6 +++--- network/message/message.pb.go | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/model/flow/service_event.go b/model/flow/service_event.go index c4d183084cb..d1e098505c8 100644 --- a/model/flow/service_event.go +++ b/model/flow/service_event.go @@ -56,7 +56,7 @@ func (se *ServiceEvent) UnmarshalJSON(b []byte) error { return err } - tp, ok := enc["Interface"].(string) + tp, ok := enc["Type"].(string) if !ok { return fmt.Errorf("missing type key") } @@ -106,7 +106,7 @@ func (se *ServiceEvent) UnmarshalMsgpack(b []byte) error { return err } - tp, ok := enc["Interface"].(string) + tp, ok := enc["Type"].(string) if !ok { return fmt.Errorf("missing type key") } @@ -156,7 +156,7 @@ func (se *ServiceEvent) UnmarshalCBOR(b []byte) error { return err } - tp, ok := enc["Interface"].(string) + tp, ok := enc["Type"].(string) if !ok { return fmt.Errorf("missing type key") } diff --git a/network/message/message.pb.go b/network/message/message.pb.go index 43d1abcc9ce..42d05c2c6a7 100644 --- a/network/message/message.pb.go +++ b/network/message/message.pb.go @@ -30,7 +30,7 @@ type Message struct { OriginID []byte `protobuf:"bytes,3,opt,name=OriginID,proto3" json:"OriginID,omitempty"` // Deprecated: Do not use. TargetIDs [][]byte `protobuf:"bytes,4,rep,name=TargetIDs,proto3" json:"TargetIDs,omitempty"` Payload []byte `protobuf:"bytes,5,opt,name=Payload,proto3" json:"Payload,omitempty"` - Type string `protobuf:"bytes,6,opt,name=Interface,proto3" json:"Interface,omitempty"` + Type string `protobuf:"bytes,6,opt,name=Type,proto3" json:"Type,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -457,7 +457,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Interface", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { From 50011e2a2e9b1877a3bbf280d38d60097d31198d Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 29 Jun 2022 10:01:55 -0400 Subject: [PATCH 057/223] fix all channels package imports --- cmd/observer/node_builder/observer_builder.go | 3 +-- consensus/integration/integration_test.go | 3 +-- consensus/integration/network_test.go | 3 +-- engine/access/access_test.go | 3 +-- engine/access/ingestion/engine.go | 3 +-- engine/access/ingestion/engine_test.go | 3 +-- engine/collection/compliance/engine.go | 3 +-- engine/collection/compliance/engine_test.go | 3 +-- engine/collection/ingest/engine.go | 3 +-- engine/collection/pusher/engine.go | 3 +-- engine/collection/pusher/engine_test.go | 3 +-- engine/collection/synchronization/engine.go | 3 +-- engine/collection/synchronization/engine_test.go | 3 +-- engine/collection/synchronization/request_handler.go | 3 +-- engine/collection/test/cluster_switchover_test.go | 3 +-- engine/common/follower/engine.go | 3 +-- engine/common/follower/engine_test.go | 5 ++--- engine/common/provider/engine.go | 3 +-- engine/common/requester/engine.go | 3 +-- engine/common/splitter/engine.go | 3 +-- engine/common/splitter/engine_test.go | 3 +-- engine/common/splitter/network/example_test.go | 3 +-- engine/common/splitter/network/network.go | 3 +-- engine/common/splitter/network/network_test.go | 3 +-- engine/common/synchronization/engine.go | 3 +-- engine/common/synchronization/engine_test.go | 3 +-- engine/common/synchronization/request_handler.go | 3 +-- engine/consensus/compliance/core_test.go | 3 +-- engine/consensus/compliance/engine.go | 3 +-- engine/consensus/compliance/engine_test.go | 3 +-- engine/consensus/dkg/messaging_engine.go | 6 ++---- engine/consensus/dkg/messaging_engine_test.go | 3 +-- engine/consensus/ingestion/engine.go | 3 +-- engine/consensus/ingestion/engine_test.go | 3 +-- engine/consensus/matching/engine.go | 3 +-- engine/consensus/matching/engine_test.go | 3 +-- engine/consensus/provider/engine.go | 3 +-- engine/consensus/sealing/engine.go | 3 +-- engine/consensus/sealing/engine_test.go | 3 +-- engine/execution/execution_test.go | 3 +-- engine/execution/ingestion/engine.go | 3 +-- engine/execution/ingestion/engine_test.go | 3 +-- engine/execution/provider/engine.go | 3 +-- engine/ghost/client/ghost_client.go | 3 +-- engine/ghost/engine/handler.go | 3 +-- engine/ghost/engine/rpc.go | 6 ++---- engine/testutil/nodes.go | 3 +-- engine/verification/requester/requester.go | 3 +-- engine/verification/requester/requester_test.go | 3 +-- engine/verification/utils/unittest/helper.go | 3 +-- engine/verification/verifier/engine.go | 3 +-- engine/verification/verifier/engine_test.go | 3 +-- module/metrics/example/collection/main.go | 3 +-- module/metrics/example/consensus/main.go | 3 +-- network/p2p/dht_test.go | 3 +-- network/p2p/libp2pNode.go | 3 +-- network/p2p/middleware.go | 3 +-- network/p2p/network.go | 4 +--- network/p2p/sporking_test.go | 4 +--- network/p2p/topic_validator_test.go | 3 +-- network/proxy/network_test.go | 3 +-- network/stub/network.go | 3 +-- network/test/echoengine_test.go | 3 +-- network/test/meshengine_test.go | 3 +-- network/test/middleware_test.go | 3 +-- network/test/testUtil.go | 3 +-- network/topology/cache_test.go | 3 +-- network/topology/helper.go | 3 +-- network/topology/randomizedTopology_test.go | 3 +-- network/topology/topicBasedTopology.go | 3 +-- network/topology/topicBasedTopology_test.go | 3 +-- utils/unittest/network/network.go | 3 +-- 72 files changed, 75 insertions(+), 151 deletions(-) diff --git a/cmd/observer/node_builder/observer_builder.go b/cmd/observer/node_builder/observer_builder.go index cad5d8e775d..d8f16d4d7f4 100644 --- a/cmd/observer/node_builder/observer_builder.go +++ b/cmd/observer/node_builder/observer_builder.go @@ -18,8 +18,6 @@ import ( dht "github.com/libp2p/go-libp2p-kad-dht" p2ppubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/onflow/flow-go/network/channels" - sdkcrypto "github.com/onflow/flow-go-sdk/crypto" "github.com/onflow/flow-go/apiproxy" "github.com/onflow/flow-go/cmd" @@ -53,6 +51,7 @@ import ( consensus_follower "github.com/onflow/flow-go/module/upstream" "github.com/onflow/flow-go/network" netcache "github.com/onflow/flow-go/network/cache" + "github.com/onflow/flow-go/network/channels" cborcodec "github.com/onflow/flow-go/network/codec/cbor" "github.com/onflow/flow-go/network/compressor" "github.com/onflow/flow-go/network/converter" diff --git a/consensus/integration/integration_test.go b/consensus/integration/integration_test.go index 6c24c383bf7..65adddf597e 100644 --- a/consensus/integration/integration_test.go +++ b/consensus/integration/integration_test.go @@ -8,11 +8,10 @@ import ( "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/util" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/consensus/integration/network_test.go b/consensus/integration/network_test.go index ff160c687cc..181e3e79adc 100644 --- a/consensus/integration/network_test.go +++ b/consensus/integration/network_test.go @@ -7,10 +7,9 @@ import ( "github.com/hashicorp/go-multierror" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" ) diff --git a/engine/access/access_test.go b/engine/access/access_test.go index 1fa4788860c..2a6eb9855fb 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -16,8 +16,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/crypto" @@ -35,6 +33,7 @@ import ( "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/signature" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" protocol "github.com/onflow/flow-go/state/protocol/mock" storage "github.com/onflow/flow-go/storage/badger" diff --git a/engine/access/ingestion/engine.go b/engine/access/ingestion/engine.go index 16fa596679b..d03fcd21b14 100644 --- a/engine/access/ingestion/engine.go +++ b/engine/access/ingestion/engine.go @@ -10,8 +10,6 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/access/rpc" @@ -23,6 +21,7 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/logging" diff --git a/engine/access/ingestion/engine_test.go b/engine/access/ingestion/engine_test.go index a826d130902..d1a413440e8 100644 --- a/engine/access/ingestion/engine_test.go +++ b/engine/access/ingestion/engine_test.go @@ -14,8 +14,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/channels" - hotmodel "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine/access/rpc" "github.com/onflow/flow-go/model/flow" @@ -26,6 +24,7 @@ import ( "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/signature" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" protocol "github.com/onflow/flow-go/state/protocol/mock" storerr "github.com/onflow/flow-go/storage" diff --git a/engine/collection/compliance/engine.go b/engine/collection/compliance/engine.go index 35cd5e69964..ae697147a63 100644 --- a/engine/collection/compliance/engine.go +++ b/engine/collection/compliance/engine.go @@ -8,8 +8,6 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/fifoqueue" @@ -24,6 +22,7 @@ import ( "github.com/onflow/flow-go/module/lifecycle" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/logging" diff --git a/engine/collection/compliance/engine_test.go b/engine/collection/compliance/engine_test.go index f669f947bdd..a74c22b6db3 100644 --- a/engine/collection/compliance/engine_test.go +++ b/engine/collection/compliance/engine_test.go @@ -10,8 +10,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/cluster" @@ -19,6 +17,7 @@ import ( "github.com/onflow/flow-go/model/messages" module "github.com/onflow/flow-go/module/mock" netint "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" protocol "github.com/onflow/flow-go/state/protocol/mock" storerr "github.com/onflow/flow-go/storage" diff --git a/engine/collection/ingest/engine.go b/engine/collection/ingest/engine.go index 34afdda6555..d66e4b2f67d 100644 --- a/engine/collection/ingest/engine.go +++ b/engine/collection/ingest/engine.go @@ -9,8 +9,6 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/fifoqueue" @@ -21,6 +19,7 @@ import ( "github.com/onflow/flow-go/module/mempool/epochs" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/utils/logging" ) diff --git a/engine/collection/pusher/engine.go b/engine/collection/pusher/engine.go index 221333490bd..226b866bf5e 100644 --- a/engine/collection/pusher/engine.go +++ b/engine/collection/pusher/engine.go @@ -8,8 +8,6 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" @@ -17,6 +15,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/logging" diff --git a/engine/collection/pusher/engine_test.go b/engine/collection/pusher/engine_test.go index 3d098aea681..6e620deaf44 100644 --- a/engine/collection/pusher/engine_test.go +++ b/engine/collection/pusher/engine_test.go @@ -8,14 +8,13 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine/collection/pusher" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" protocol "github.com/onflow/flow-go/state/protocol/mock" storage "github.com/onflow/flow-go/storage/mock" diff --git a/engine/collection/synchronization/engine.go b/engine/collection/synchronization/engine.go index 2c3a1ad1070..402276951c7 100644 --- a/engine/collection/synchronization/engine.go +++ b/engine/collection/synchronization/engine.go @@ -10,8 +10,6 @@ import ( "github.com/hashicorp/go-multierror" "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/fifoqueue" commonsync "github.com/onflow/flow-go/engine/common/synchronization" @@ -25,6 +23,7 @@ import ( "github.com/onflow/flow-go/module/metrics" synccore "github.com/onflow/flow-go/module/synchronization" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/cluster" "github.com/onflow/flow-go/storage" ) diff --git a/engine/collection/synchronization/engine_test.go b/engine/collection/synchronization/engine_test.go index 5c93e947dd3..ea942df838c 100644 --- a/engine/collection/synchronization/engine_test.go +++ b/engine/collection/synchronization/engine_test.go @@ -12,8 +12,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine" clustermodel "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/events" @@ -24,6 +22,7 @@ import ( module "github.com/onflow/flow-go/module/mock" synccore "github.com/onflow/flow-go/module/synchronization" netint "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" clusterint "github.com/onflow/flow-go/state/cluster" cluster "github.com/onflow/flow-go/state/cluster/mock" diff --git a/engine/collection/synchronization/request_handler.go b/engine/collection/synchronization/request_handler.go index 6a36d4a189b..ec23bed2ade 100644 --- a/engine/collection/synchronization/request_handler.go +++ b/engine/collection/synchronization/request_handler.go @@ -6,8 +6,6 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine" commonsync "github.com/onflow/flow-go/engine/common/synchronization" clustermodel "github.com/onflow/flow-go/model/cluster" @@ -17,6 +15,7 @@ import ( "github.com/onflow/flow-go/module/lifecycle" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/cluster" "github.com/onflow/flow-go/storage" ) diff --git a/engine/collection/test/cluster_switchover_test.go b/engine/collection/test/cluster_switchover_test.go index ef13b53e890..d04a233b48d 100644 --- a/engine/collection/test/cluster_switchover_test.go +++ b/engine/collection/test/cluster_switchover_test.go @@ -8,8 +8,6 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/cmd/bootstrap/run" "github.com/onflow/flow-go/engine/testutil" testmock "github.com/onflow/flow-go/engine/testutil/mock" @@ -19,6 +17,7 @@ import ( "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/util" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/stub" "github.com/onflow/flow-go/state/cluster" diff --git a/engine/common/follower/engine.go b/engine/common/follower/engine.go index 4609049709c..150205ceaa0 100644 --- a/engine/common/follower/engine.go +++ b/engine/common/follower/engine.go @@ -8,8 +8,6 @@ import ( "github.com/hashicorp/go-multierror" "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/events" "github.com/onflow/flow-go/model/flow" @@ -19,6 +17,7 @@ import ( "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" diff --git a/engine/common/follower/engine_test.go b/engine/common/follower/engine_test.go index fa6c696720d..2b020605fd0 100644 --- a/engine/common/follower/engine_test.go +++ b/engine/common/follower/engine_test.go @@ -9,14 +9,13 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine/common/follower" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/compliance" - metrics "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" protocol "github.com/onflow/flow-go/state/protocol/mock" realstorage "github.com/onflow/flow-go/storage" diff --git a/engine/common/provider/engine.go b/engine/common/provider/engine.go index 4622e43723b..85f0d32338f 100644 --- a/engine/common/provider/engine.go +++ b/engine/common/provider/engine.go @@ -7,8 +7,6 @@ import ( "github.com/rs/zerolog" "github.com/vmihailenco/msgpack" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" @@ -16,6 +14,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" ) diff --git a/engine/common/requester/engine.go b/engine/common/requester/engine.go index c3f0e34897f..a8069d48cc5 100644 --- a/engine/common/requester/engine.go +++ b/engine/common/requester/engine.go @@ -9,8 +9,6 @@ import ( "github.com/rs/zerolog" "github.com/vmihailenco/msgpack" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" @@ -18,6 +16,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/utils/logging" ) diff --git a/engine/common/splitter/engine.go b/engine/common/splitter/engine.go index a054a497898..bfb4169e2a4 100644 --- a/engine/common/splitter/engine.go +++ b/engine/common/splitter/engine.go @@ -7,11 +7,10 @@ import ( "github.com/hashicorp/go-multierror" "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) // Engine is the splitter engine, which maintains a list of registered engines diff --git a/engine/common/splitter/engine_test.go b/engine/common/splitter/engine_test.go index 613c1d0de8a..f4a5e1b3abf 100644 --- a/engine/common/splitter/engine_test.go +++ b/engine/common/splitter/engine_test.go @@ -9,9 +9,8 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine/common/splitter" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/engine/common/splitter/network/example_test.go b/engine/common/splitter/network/example_test.go index 16c0b94881c..b94f9e8a70e 100644 --- a/engine/common/splitter/network/example_test.go +++ b/engine/common/splitter/network/example_test.go @@ -6,10 +6,9 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - splitterNetwork "github.com/onflow/flow-go/engine/common/splitter/network" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/network/channels" testnet "github.com/onflow/flow-go/utils/unittest/network" ) diff --git a/engine/common/splitter/network/network.go b/engine/common/splitter/network/network.go index 36c60de083a..15957f26b43 100644 --- a/engine/common/splitter/network/network.go +++ b/engine/common/splitter/network/network.go @@ -9,13 +9,12 @@ import ( "github.com/libp2p/go-libp2p-core/protocol" "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - splitterEngine "github.com/onflow/flow-go/engine/common/splitter" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/util" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) // Network is the splitter network. It is a wrapper around the default network implementation diff --git a/engine/common/splitter/network/network_test.go b/engine/common/splitter/network/network_test.go index 74af229810d..acc6564a1b1 100644 --- a/engine/common/splitter/network/network_test.go +++ b/engine/common/splitter/network/network_test.go @@ -7,10 +7,9 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/channels" - splitternetwork "github.com/onflow/flow-go/engine/common/splitter/network" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/engine/common/synchronization/engine.go b/engine/common/synchronization/engine.go index 523108ce353..2170e812c0d 100644 --- a/engine/common/synchronization/engine.go +++ b/engine/common/synchronization/engine.go @@ -10,8 +10,6 @@ import ( "github.com/hashicorp/go-multierror" "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/fifoqueue" "github.com/onflow/flow-go/model/chainsync" @@ -24,6 +22,7 @@ import ( "github.com/onflow/flow-go/module/metrics" synccore "github.com/onflow/flow-go/module/synchronization" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/storage" ) diff --git a/engine/common/synchronization/engine_test.go b/engine/common/synchronization/engine_test.go index 1bd0f9146a3..a1c54bcb2c4 100644 --- a/engine/common/synchronization/engine_test.go +++ b/engine/common/synchronization/engine_test.go @@ -12,8 +12,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/events" @@ -25,6 +23,7 @@ import ( module "github.com/onflow/flow-go/module/mock" synccore "github.com/onflow/flow-go/module/synchronization" netint "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/p2p" protocolint "github.com/onflow/flow-go/state/protocol" diff --git a/engine/common/synchronization/request_handler.go b/engine/common/synchronization/request_handler.go index 8f34dfc1b92..82187398eec 100644 --- a/engine/common/synchronization/request_handler.go +++ b/engine/common/synchronization/request_handler.go @@ -6,8 +6,6 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" @@ -15,6 +13,7 @@ import ( "github.com/onflow/flow-go/module/lifecycle" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/synchronization" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/storage" ) diff --git a/engine/consensus/compliance/core_test.go b/engine/consensus/compliance/core_test.go index 7395d6f8634..952b10f3e17 100644 --- a/engine/consensus/compliance/core_test.go +++ b/engine/consensus/compliance/core_test.go @@ -11,8 +11,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/channels" - hotstuff "github.com/onflow/flow-go/consensus/hotstuff/mocks" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" @@ -24,6 +22,7 @@ import ( module "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/trace" netint "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" protint "github.com/onflow/flow-go/state/protocol" protocol "github.com/onflow/flow-go/state/protocol/mock" diff --git a/engine/consensus/compliance/engine.go b/engine/consensus/compliance/engine.go index 4ab022377a2..270a6b9cf3c 100644 --- a/engine/consensus/compliance/engine.go +++ b/engine/consensus/compliance/engine.go @@ -8,8 +8,6 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/fifoqueue" @@ -23,6 +21,7 @@ import ( "github.com/onflow/flow-go/module/lifecycle" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/logging" diff --git a/engine/consensus/compliance/engine_test.go b/engine/consensus/compliance/engine_test.go index f5bd71a7b17..fa9585a3349 100644 --- a/engine/consensus/compliance/engine_test.go +++ b/engine/consensus/compliance/engine_test.go @@ -10,13 +10,12 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" modulemock "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/engine/consensus/dkg/messaging_engine.go b/engine/consensus/dkg/messaging_engine.go index 207ed689d62..80705d93b51 100644 --- a/engine/consensus/dkg/messaging_engine.go +++ b/engine/consensus/dkg/messaging_engine.go @@ -5,11 +5,8 @@ import ( "fmt" "time" - "github.com/sethvargo/go-retry" - - "github.com/onflow/flow-go/network/channels" - "github.com/rs/zerolog" + "github.com/sethvargo/go-retry" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" @@ -17,6 +14,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/dkg" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) // retryMax is the maximum number of times the engine will attempt to forward diff --git a/engine/consensus/dkg/messaging_engine_test.go b/engine/consensus/dkg/messaging_engine_test.go index 576f819435c..1c7d1c6e7fb 100644 --- a/engine/consensus/dkg/messaging_engine_test.go +++ b/engine/consensus/dkg/messaging_engine_test.go @@ -8,11 +8,10 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - msg "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/dkg" module "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/engine/consensus/ingestion/engine.go b/engine/consensus/ingestion/engine.go index ab1bf76207e..a260e3e9157 100644 --- a/engine/consensus/ingestion/engine.go +++ b/engine/consensus/ingestion/engine.go @@ -8,8 +8,6 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/fifoqueue" "github.com/onflow/flow-go/model/flow" @@ -18,6 +16,7 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) // defaultGuaranteeQueueCapacity maximum capacity of pending events queue, everything above will be dropped diff --git a/engine/consensus/ingestion/engine_test.go b/engine/consensus/ingestion/engine_test.go index cfff0c972ca..a146816bfa9 100644 --- a/engine/consensus/ingestion/engine_test.go +++ b/engine/consensus/ingestion/engine_test.go @@ -13,13 +13,12 @@ import ( "github.com/stretchr/testify/suite" "go.uber.org/atomic" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" mockmodule "github.com/onflow/flow-go/module/mock" netint "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/engine/consensus/matching/engine.go b/engine/consensus/matching/engine.go index 2f0157f607b..74a8a233e88 100644 --- a/engine/consensus/matching/engine.go +++ b/engine/consensus/matching/engine.go @@ -5,8 +5,6 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/fifoqueue" @@ -15,6 +13,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" ) diff --git a/engine/consensus/matching/engine_test.go b/engine/consensus/matching/engine_test.go index cce9a5eac96..7006722d566 100644 --- a/engine/consensus/matching/engine_test.go +++ b/engine/consensus/matching/engine_test.go @@ -9,14 +9,13 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" mockconsensus "github.com/onflow/flow-go/engine/consensus/mock" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" mockmodule "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" mockprotocol "github.com/onflow/flow-go/state/protocol/mock" mockstorage "github.com/onflow/flow-go/storage/mock" diff --git a/engine/consensus/provider/engine.go b/engine/consensus/provider/engine.go index b77270029b1..145b819c5c1 100644 --- a/engine/consensus/provider/engine.go +++ b/engine/consensus/provider/engine.go @@ -7,8 +7,6 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" @@ -16,6 +14,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/utils/logging" ) diff --git a/engine/consensus/sealing/engine.go b/engine/consensus/sealing/engine.go index ea52c271555..dfcda9d780a 100644 --- a/engine/consensus/sealing/engine.go +++ b/engine/consensus/sealing/engine.go @@ -6,8 +6,6 @@ import ( "github.com/gammazero/workerpool" "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/engine" @@ -20,6 +18,7 @@ import ( "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" ) diff --git a/engine/consensus/sealing/engine_test.go b/engine/consensus/sealing/engine_test.go index 6f83e1ab243..1839a84cf11 100644 --- a/engine/consensus/sealing/engine_test.go +++ b/engine/consensus/sealing/engine_test.go @@ -10,8 +10,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" mockconsensus "github.com/onflow/flow-go/engine/consensus/mock" @@ -19,6 +17,7 @@ import ( "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/metrics" mockmodule "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network/channels" mockprotocol "github.com/onflow/flow-go/state/protocol/mock" mockstorage "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" diff --git a/engine/execution/execution_test.go b/engine/execution/execution_test.go index 4971ff0aac0..9dccdd80835 100644 --- a/engine/execution/execution_test.go +++ b/engine/execution/execution_test.go @@ -13,8 +13,6 @@ import ( "github.com/vmihailenco/msgpack" "go.uber.org/atomic" - "github.com/onflow/flow-go/network/channels" - execTestutil "github.com/onflow/flow-go/engine/execution/testutil" "github.com/onflow/flow-go/engine/testutil" testmock "github.com/onflow/flow-go/engine/testutil/mock" @@ -22,6 +20,7 @@ import ( "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/signature" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/stub" "github.com/onflow/flow-go/state/cluster" diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index b566d889549..811bbac1f60 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -11,8 +11,6 @@ import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/engine" @@ -31,6 +29,7 @@ import ( "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" psEvents "github.com/onflow/flow-go/state/protocol/events" "github.com/onflow/flow-go/storage" diff --git a/engine/execution/ingestion/engine_test.go b/engine/execution/ingestion/engine_test.go index 1b11292646d..59495f851e5 100644 --- a/engine/execution/ingestion/engine_test.go +++ b/engine/execution/ingestion/engine_test.go @@ -14,8 +14,6 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/engine/execution" computation "github.com/onflow/flow-go/engine/execution/computation/mock" @@ -32,6 +30,7 @@ import ( module "github.com/onflow/flow-go/module/mocks" "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" stateProtocol "github.com/onflow/flow-go/state/protocol" protocol "github.com/onflow/flow-go/state/protocol/mock" diff --git a/engine/execution/provider/engine.go b/engine/execution/provider/engine.go index 7eddc4f8786..c34e0c21e1a 100644 --- a/engine/execution/provider/engine.go +++ b/engine/execution/provider/engine.go @@ -9,8 +9,6 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/execution/state" "github.com/onflow/flow-go/model/flow" @@ -19,6 +17,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/logging" diff --git a/engine/ghost/client/ghost_client.go b/engine/ghost/client/ghost_client.go index e93b92f648a..8b08df80bc1 100644 --- a/engine/ghost/client/ghost_client.go +++ b/engine/ghost/client/ghost_client.go @@ -8,13 +8,12 @@ import ( "google.golang.org/grpc" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/utils/unittest" ghost "github.com/onflow/flow-go/engine/ghost/protobuf" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) // GhostClient is a client for the ghost node. diff --git a/engine/ghost/engine/handler.go b/engine/ghost/engine/handler.go index 99ae48b8241..6d1bc2b17d6 100644 --- a/engine/ghost/engine/handler.go +++ b/engine/ghost/engine/handler.go @@ -9,11 +9,10 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "github.com/onflow/flow-go/network/channels" - ghost "github.com/onflow/flow-go/engine/ghost/protobuf" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) // Handler handles the GRPC calls from a client diff --git a/engine/ghost/engine/rpc.go b/engine/ghost/engine/rpc.go index 5909c9dba63..9044c5b3d8d 100644 --- a/engine/ghost/engine/rpc.go +++ b/engine/ghost/engine/rpc.go @@ -7,15 +7,13 @@ import ( "github.com/rs/zerolog" "google.golang.org/grpc" - "github.com/onflow/flow-go/network/channels" - - cborcodec "github.com/onflow/flow-go/network/codec/cbor" - "github.com/onflow/flow-go/engine" ghost "github.com/onflow/flow-go/engine/ghost/protobuf" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" + cborcodec "github.com/onflow/flow-go/network/codec/cbor" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/utils/grpcutils" ) diff --git a/engine/testutil/nodes.go b/engine/testutil/nodes.go index f2cec55337b..0c5e0cb1b47 100644 --- a/engine/testutil/nodes.go +++ b/engine/testutil/nodes.go @@ -12,8 +12,6 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/consensus" "github.com/onflow/flow-go/consensus/hotstuff" mockhotstuff "github.com/onflow/flow-go/consensus/hotstuff/mocks" @@ -71,6 +69,7 @@ import ( chainsync "github.com/onflow/flow-go/module/synchronization" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/module/validation" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/stub" "github.com/onflow/flow-go/state/protocol" diff --git a/engine/verification/requester/requester.go b/engine/verification/requester/requester.go index 75e9100ebcd..2f3fb1b620e 100644 --- a/engine/verification/requester/requester.go +++ b/engine/verification/requester/requester.go @@ -8,8 +8,6 @@ import ( "github.com/rs/zerolog" "golang.org/x/exp/rand" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/verification/fetcher" "github.com/onflow/flow-go/model/flow" @@ -19,6 +17,7 @@ import ( "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/utils/logging" ) diff --git a/engine/verification/requester/requester_test.go b/engine/verification/requester/requester_test.go index a6f5e79e20f..4fbf013c915 100644 --- a/engine/verification/requester/requester_test.go +++ b/engine/verification/requester/requester_test.go @@ -9,8 +9,6 @@ import ( testifymock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - mockfetcher "github.com/onflow/flow-go/engine/verification/fetcher/mock" "github.com/onflow/flow-go/engine/verification/requester" vertestutils "github.com/onflow/flow-go/engine/verification/utils/unittest" @@ -23,6 +21,7 @@ import ( mempool "github.com/onflow/flow-go/module/mempool/mock" "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" protocol "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/utils/unittest" diff --git a/engine/verification/utils/unittest/helper.go b/engine/verification/utils/unittest/helper.go index 0f5ed286a16..d5b596ee5ec 100644 --- a/engine/verification/utils/unittest/helper.go +++ b/engine/verification/utils/unittest/helper.go @@ -13,8 +13,6 @@ import ( testifymock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/engine/testutil" @@ -30,6 +28,7 @@ import ( "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/stub" "github.com/onflow/flow-go/state/protocol" diff --git a/engine/verification/verifier/engine.go b/engine/verification/verifier/engine.go index 4b60e42c94f..a7e4261f852 100644 --- a/engine/verification/verifier/engine.go +++ b/engine/verification/verifier/engine.go @@ -7,8 +7,6 @@ import ( "github.com/opentracing/opentracing-go/log" "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/engine" @@ -22,6 +20,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/logging" diff --git a/engine/verification/verifier/engine_test.go b/engine/verification/verifier/engine_test.go index 858d6776629..fe3dee4edf9 100644 --- a/engine/verification/verifier/engine_test.go +++ b/engine/verification/verifier/engine_test.go @@ -12,8 +12,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/engine/testutil/mocklocal" @@ -25,6 +23,7 @@ import ( realModule "github.com/onflow/flow-go/module" mockmodule "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" protocol "github.com/onflow/flow-go/state/protocol/mock" mockstorage "github.com/onflow/flow-go/storage/mock" diff --git a/module/metrics/example/collection/main.go b/module/metrics/example/collection/main.go index c0fb3ab2cde..6eeab839548 100644 --- a/module/metrics/example/collection/main.go +++ b/module/metrics/example/collection/main.go @@ -6,11 +6,10 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/metrics/example" "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/queue" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/module/metrics/example/consensus/main.go b/module/metrics/example/consensus/main.go index 8b559fbd187..582b839ab41 100644 --- a/module/metrics/example/consensus/main.go +++ b/module/metrics/example/consensus/main.go @@ -8,12 +8,11 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/metrics/example" "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/network/p2p/dht_test.go b/network/p2p/dht_test.go index e7fce6d88d6..ab1602534db 100644 --- a/network/p2p/dht_test.go +++ b/network/p2p/dht_test.go @@ -12,9 +12,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - libp2pmsg "github.com/onflow/flow-go/model/libp2p/message" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/utils/unittest" diff --git a/network/p2p/libp2pNode.go b/network/p2p/libp2pNode.go index f314bf79b75..2bcba4df7fb 100644 --- a/network/p2p/libp2pNode.go +++ b/network/p2p/libp2pNode.go @@ -18,9 +18,8 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - flownet "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p/unicast" validator "github.com/onflow/flow-go/network/validator/pubsub" ) diff --git a/network/p2p/middleware.go b/network/p2p/middleware.go index 3e21f428b97..7297d9b7218 100644 --- a/network/p2p/middleware.go +++ b/network/p2p/middleware.go @@ -19,14 +19,13 @@ import ( "github.com/libp2p/go-libp2p-core/protocol" "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p/unicast" "github.com/onflow/flow-go/network/validator" diff --git a/network/p2p/network.go b/network/p2p/network.go index a212a063c6a..79e1c33add1 100644 --- a/network/p2p/network.go +++ b/network/p2p/network.go @@ -12,10 +12,7 @@ import ( "github.com/libp2p/go-libp2p-core/protocol" "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/crypto/hash" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" @@ -24,6 +21,7 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network" netcache "github.com/onflow/flow-go/network/cache" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p/conduit" "github.com/onflow/flow-go/network/queue" diff --git a/network/p2p/sporking_test.go b/network/p2p/sporking_test.go index a166ace8983..54c2f769ade 100644 --- a/network/p2p/sporking_test.go +++ b/network/p2p/sporking_test.go @@ -11,10 +11,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - libp2pmessage "github.com/onflow/flow-go/model/libp2p/message" - + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/utils/unittest" diff --git a/network/p2p/topic_validator_test.go b/network/p2p/topic_validator_test.go index 0702af418ea..319cb45e3b4 100644 --- a/network/p2p/topic_validator_test.go +++ b/network/p2p/topic_validator_test.go @@ -14,10 +14,9 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" validator "github.com/onflow/flow-go/network/validator/pubsub" diff --git a/network/proxy/network_test.go b/network/proxy/network_test.go index e5d9046398a..387009919d3 100644 --- a/network/proxy/network_test.go +++ b/network/proxy/network_test.go @@ -6,10 +6,9 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/proxy" "github.com/onflow/flow-go/utils/unittest" diff --git a/network/stub/network.go b/network/stub/network.go index 966ee1f7c1a..4af5a6f6260 100644 --- a/network/stub/network.go +++ b/network/stub/network.go @@ -10,10 +10,9 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/p2p/conduit" ) diff --git a/network/test/echoengine_test.go b/network/test/echoengine_test.go index 7dad62eb6ba..4c76729d6b6 100644 --- a/network/test/echoengine_test.go +++ b/network/test/echoengine_test.go @@ -15,11 +15,10 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/network/test/meshengine_test.go b/network/test/meshengine_test.go index be68c01963b..f8c68fcb752 100644 --- a/network/test/meshengine_test.go +++ b/network/test/meshengine_test.go @@ -18,13 +18,12 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/module/observable" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/network/test/middleware_test.go b/network/test/middleware_test.go index a14a70ad5ff..fd08ecb3be0 100644 --- a/network/test/middleware_test.go +++ b/network/test/middleware_test.go @@ -15,8 +15,6 @@ import ( "github.com/stretchr/testify/mock" mockery "github.com/stretchr/testify/mock" - "github.com/onflow/flow-go/network/channels" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -27,6 +25,7 @@ import ( "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/observable" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/codec/cbor" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/mocknetwork" diff --git a/network/test/testUtil.go b/network/test/testUtil.go index 6ed537be9b6..da68dd8f3c1 100644 --- a/network/test/testUtil.go +++ b/network/test/testUtil.go @@ -19,8 +19,6 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" @@ -35,6 +33,7 @@ import ( "github.com/onflow/flow-go/module/util" "github.com/onflow/flow-go/network" netcache "github.com/onflow/flow-go/network/cache" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/codec/cbor" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/unicast" diff --git a/network/topology/cache_test.go b/network/topology/cache_test.go index e74c5c7c343..45c2242a8d4 100644 --- a/network/topology/cache_test.go +++ b/network/topology/cache_test.go @@ -9,10 +9,9 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" mockprotocol "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/utils/unittest" diff --git a/network/topology/helper.go b/network/topology/helper.go index 7b46b1fc509..e02af6e54e6 100644 --- a/network/topology/helper.go +++ b/network/topology/helper.go @@ -8,12 +8,11 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/state/protocol" mockprotocol "github.com/onflow/flow-go/state/protocol/mock" diff --git a/network/topology/randomizedTopology_test.go b/network/topology/randomizedTopology_test.go index 6df7c67e480..cd9079b95dc 100644 --- a/network/topology/randomizedTopology_test.go +++ b/network/topology/randomizedTopology_test.go @@ -9,10 +9,9 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/network/topology/topicBasedTopology.go b/network/topology/topicBasedTopology.go index d2c2e5d1822..7718e88f92e 100644 --- a/network/topology/topicBasedTopology.go +++ b/network/topology/topicBasedTopology.go @@ -5,11 +5,10 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" ) diff --git a/network/topology/topicBasedTopology_test.go b/network/topology/topicBasedTopology_test.go index 61b8c69bb9a..240428d24a7 100644 --- a/network/topology/topicBasedTopology_test.go +++ b/network/topology/topicBasedTopology_test.go @@ -9,10 +9,9 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/utils/unittest/network/network.go b/utils/unittest/network/network.go index 8b8b40eb525..aa9541e57de 100644 --- a/utils/unittest/network/network.go +++ b/utils/unittest/network/network.go @@ -5,10 +5,9 @@ import ( "github.com/stretchr/testify/mock" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" ) From 0f547e06d43097d5899d0359675a8ab783f8736b Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 29 Jun 2022 10:04:41 -0400 Subject: [PATCH 058/223] fix grpc insecure lint error --- apiproxy/access_api_proxy_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/apiproxy/access_api_proxy_test.go b/apiproxy/access_api_proxy_test.go index e8ee18f9258..85be5054c09 100644 --- a/apiproxy/access_api_proxy_test.go +++ b/apiproxy/access_api_proxy_test.go @@ -9,6 +9,7 @@ import ( "github.com/onflow/flow/protobuf/go/flow/access" "google.golang.org/grpc" + grpcinsecure "google.golang.org/grpc/credentials/insecure" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/grpcutils" @@ -245,7 +246,7 @@ func openFlowLite(address string) error { c, err := grpc.Dial( "unix://"+address, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcutils.DefaultMaxMsgSize)), - grpc.WithInsecure()) //nolint:staticcheck + grpc.WithTransportCredentials(grpcinsecure.NewCredentials())) if err != nil { return err } From 4abf3d7225a386ffa9e828162914ae8189f68830 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 29 Jun 2022 10:16:54 -0400 Subject: [PATCH 059/223] sort channels package imports --- engine/common/synchronization/request_handler_engine.go | 3 +-- insecure/attacknetwork/attackNetwork.go | 3 +-- insecure/corruptible/conduit_test.go | 3 +-- insecure/corruptible/factory.go | 3 +-- insecure/corruptible/factory_test.go | 3 +-- insecure/fixtures.go | 3 +-- insecure/integration/test/composability_test.go | 3 +-- insecure/wintermute/attackOrchestrator_test.go | 3 +-- insecure/wintermute/helpers.go | 3 +-- network/network.go | 3 +-- network/p2p/libp2pNodeBuilder.go | 3 +-- network/p2p/subscription_filter.go | 3 +-- network/p2p/subscription_filter_test.go | 3 +-- network/relay/network.go | 3 +-- network/relay/relayer.go | 3 +-- network/test/blob_service_test.go | 6 ++---- network/validator/pubsub/authorized_sender_validator.go | 6 ++---- .../validator/pubsub/authorized_sender_validator_test.go | 6 ++---- 18 files changed, 21 insertions(+), 42 deletions(-) diff --git a/engine/common/synchronization/request_handler_engine.go b/engine/common/synchronization/request_handler_engine.go index 1f1de64f4ce..95c49fd4442 100644 --- a/engine/common/synchronization/request_handler_engine.go +++ b/engine/common/synchronization/request_handler_engine.go @@ -5,12 +5,11 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/storage" ) diff --git a/insecure/attacknetwork/attackNetwork.go b/insecure/attacknetwork/attackNetwork.go index 7eccff3bd6c..c9eeb45e5ab 100644 --- a/insecure/attacknetwork/attackNetwork.go +++ b/insecure/attacknetwork/attackNetwork.go @@ -7,13 +7,12 @@ import ( "github.com/hashicorp/go-multierror" "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/insecure" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/utils/logging" ) diff --git a/insecure/corruptible/conduit_test.go b/insecure/corruptible/conduit_test.go index f9b08b92415..da3e32dfaec 100644 --- a/insecure/corruptible/conduit_test.go +++ b/insecure/corruptible/conduit_test.go @@ -8,10 +8,9 @@ import ( "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/insecure" mockinsecure "github.com/onflow/flow-go/insecure/mock" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/insecure/corruptible/factory.go b/insecure/corruptible/factory.go index 5aa25c3beb8..53953cb3e4b 100644 --- a/insecure/corruptible/factory.go +++ b/insecure/corruptible/factory.go @@ -12,8 +12,6 @@ import ( "github.com/rs/zerolog" "google.golang.org/grpc" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/engine/execution/ingestion" "github.com/onflow/flow-go/engine/execution/state/delta" @@ -26,6 +24,7 @@ import ( "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/utils/logging" ) diff --git a/insecure/corruptible/factory_test.go b/insecure/corruptible/factory_test.go index fa222f2e5ea..30812ff516e 100644 --- a/insecure/corruptible/factory_test.go +++ b/insecure/corruptible/factory_test.go @@ -15,13 +15,12 @@ import ( "google.golang.org/grpc" grpcinsecure "google.golang.org/grpc/credentials/insecure" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine/testutil" "github.com/onflow/flow-go/insecure" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/codec/cbor" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/utils/unittest" diff --git a/insecure/fixtures.go b/insecure/fixtures.go index 89d39d5b99c..8b5372bf5bc 100644 --- a/insecure/fixtures.go +++ b/insecure/fixtures.go @@ -7,11 +7,10 @@ import ( "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/insecure/integration/test/composability_test.go b/insecure/integration/test/composability_test.go index effec56b39a..6f9bfb9e862 100644 --- a/insecure/integration/test/composability_test.go +++ b/insecure/integration/test/composability_test.go @@ -9,8 +9,6 @@ import ( "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine/testutil" "github.com/onflow/flow-go/insecure" "github.com/onflow/flow-go/insecure/attacknetwork" @@ -18,6 +16,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/codec/cbor" "github.com/onflow/flow-go/network/stub" "github.com/onflow/flow-go/utils/unittest" diff --git a/insecure/wintermute/attackOrchestrator_test.go b/insecure/wintermute/attackOrchestrator_test.go index 573fde600a5..6e832818cab 100644 --- a/insecure/wintermute/attackOrchestrator_test.go +++ b/insecure/wintermute/attackOrchestrator_test.go @@ -8,13 +8,12 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/insecure" mockinsecure "github.com/onflow/flow-go/insecure/mock" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/messages" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/insecure/wintermute/helpers.go b/insecure/wintermute/helpers.go index 896dc2cebe8..eece69c90d3 100644 --- a/insecure/wintermute/helpers.go +++ b/insecure/wintermute/helpers.go @@ -5,8 +5,6 @@ import ( "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/engine/testutil" enginemock "github.com/onflow/flow-go/engine/testutil/mock" "github.com/onflow/flow-go/insecure" @@ -14,6 +12,7 @@ import ( "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/network/network.go b/network/network.go index c087031c285..dc6a0ce7335 100644 --- a/network/network.go +++ b/network/network.go @@ -4,10 +4,9 @@ import ( "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p-core/protocol" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/network/channels" ) // Network represents the network layer of the node. It allows processes that diff --git a/network/p2p/libp2pNodeBuilder.go b/network/p2p/libp2pNodeBuilder.go index ecb6891c69c..d581305bb26 100644 --- a/network/p2p/libp2pNodeBuilder.go +++ b/network/p2p/libp2pNodeBuilder.go @@ -21,13 +21,12 @@ import ( madns "github.com/multiformats/go-multiaddr-dns" "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - fcrypto "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/id" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p/keyutils" "github.com/onflow/flow-go/network/p2p/unicast" ) diff --git a/network/p2p/subscription_filter.go b/network/p2p/subscription_filter.go index bba023afbda..c44bff568b3 100644 --- a/network/p2p/subscription_filter.go +++ b/network/p2p/subscription_filter.go @@ -4,10 +4,9 @@ import ( "github.com/libp2p/go-libp2p-core/peer" pb "github.com/libp2p/go-libp2p-pubsub/pb" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/id" + "github.com/onflow/flow-go/network/channels" ) // RoleBasedFilter implements a subscription filter that filters subscriptions based on a node's role. diff --git a/network/p2p/subscription_filter_test.go b/network/p2p/subscription_filter_test.go index 9095634f10f..6fa33b60589 100644 --- a/network/p2p/subscription_filter_test.go +++ b/network/p2p/subscription_filter_test.go @@ -11,10 +11,9 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/id" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/network/relay/network.go b/network/relay/network.go index 37b1b6f208a..7fc155d55f6 100644 --- a/network/relay/network.go +++ b/network/relay/network.go @@ -7,11 +7,10 @@ import ( "github.com/libp2p/go-libp2p-core/protocol" "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/util" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) type RelayNetwork struct { diff --git a/network/relay/relayer.go b/network/relay/relayer.go index d64028ffae6..01ddb48f9ef 100644 --- a/network/relay/relayer.go +++ b/network/relay/relayer.go @@ -5,10 +5,9 @@ import ( "golang.org/x/sync/errgroup" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" ) type Relayer struct { diff --git a/network/test/blob_service_test.go b/network/test/blob_service_test.go index 39db5c449db..d0938856089 100644 --- a/network/test/blob_service_test.go +++ b/network/test/blob_service_test.go @@ -15,16 +15,14 @@ import ( "github.com/stretchr/testify/suite" "go.uber.org/atomic" - "github.com/onflow/flow-go/network/channels" - - "github.com/onflow/flow-go/utils/unittest" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/util" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/topology" + "github.com/onflow/flow-go/utils/unittest" ) // conditionalTopology is a topology that behaves like the underlying topology when the condition is true, diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index 299e202efd5..399b3d41f31 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -11,12 +11,10 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/slashing" - + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/message" - - "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/network/slashing" ) var ( diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index d6362bb1f58..55bfacc9f38 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -6,12 +6,10 @@ import ( "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/message" - - "github.com/onflow/flow-go/model/messages" - - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) From 5ec414291867e014c10e7f1c97f98300023f4c4e Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Wed, 29 Jun 2022 11:20:39 -0400 Subject: [PATCH 060/223] update to read only channel --- consensus/hotstuff/follower_loop.go | 2 +- module/mock/hot_stuff_follower.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/consensus/hotstuff/follower_loop.go b/consensus/hotstuff/follower_loop.go index ad46de4d11f..3c9cdffe358 100644 --- a/consensus/hotstuff/follower_loop.go +++ b/consensus/hotstuff/follower_loop.go @@ -40,7 +40,7 @@ func NewFollowerLoop(log zerolog.Logger, followerLogic FollowerLogic) (*Follower // // Block proposals must be submitted in order, i.e. a proposal's parent must // have been previously processed by the FollowerLoop. -func (fl *FollowerLoop) SubmitProposal(proposalHeader *flow.Header, parentView uint64) chan struct{} { +func (fl *FollowerLoop) SubmitProposal(proposalHeader *flow.Header, parentView uint64) <-chan struct{} { received := time.Now() proposal := proposalTask{ Proposal: model.ProposalFromFlow(proposalHeader, parentView), diff --git a/module/mock/hot_stuff_follower.go b/module/mock/hot_stuff_follower.go index 02838ee8136..6a779e4e8c7 100644 --- a/module/mock/hot_stuff_follower.go +++ b/module/mock/hot_stuff_follower.go @@ -45,15 +45,15 @@ func (_m *HotStuffFollower) Ready() <-chan struct{} { } // SubmitProposal provides a mock function with given fields: proposal, parentView -func (_m *HotStuffFollower) SubmitProposal(proposal *flow.Header, parentView uint64) chan struct{} { +func (_m *HotStuffFollower) SubmitProposal(proposal *flow.Header, parentView uint64) <-chan struct{} { ret := _m.Called(proposal, parentView) - var r0 chan struct{} - if rf, ok := ret.Get(0).(func(*flow.Header, uint64) chan struct{}); ok { + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func(*flow.Header, uint64) <-chan struct{}); ok { r0 = rf(proposal, parentView) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(chan struct{}) + r0 = ret.Get(0).(<-chan struct{}) } } From 777af0f8a6975261a521009d5aba110cb21d4f26 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 29 Jun 2022 11:50:01 -0400 Subject: [PATCH 061/223] improve error documentation for middleware - add BenignNetworkingError sentinel that covers the entire class of benign networking errors - add MiddlewareStartError sentinel that covers the entire class of errors returned from Middleware.start - return appropriate sentinel errors and update godocs --- network/errors.go | 27 ++++++++++++- network/p2p/errors.go | 28 ++++++++++++++ network/p2p/middleware.go | 79 ++++++++++++++++++++++++++++++--------- 3 files changed, 115 insertions(+), 19 deletions(-) create mode 100644 network/p2p/errors.go diff --git a/network/errors.go b/network/errors.go index 5eb23592c3c..84bd25a0f7a 100644 --- a/network/errors.go +++ b/network/errors.go @@ -1,7 +1,32 @@ package network -import "errors" +import ( + "errors" + "fmt" +) var ( EmptyTargetList = errors.New("target list empty") ) + +// BenignNetworkingError covers the entire class of benign networking errors. +// This error is only returned, if the networking layer is still fully functional +// despite the encountering the error condition. +type BenignNetworkingError struct { + err error +} + +func NewBenignNetworkingErrorf(msg string, args ...interface{}) error { + return BenignNetworkingError{ + err: fmt.Errorf(msg, args...), + } +} + +func (e BenignNetworkingError) Error() string { return e.err.Error() } +func (e BenignNetworkingError) Unwrap() error { return e.err } + +// IsBenignNetworkingError returns whether err is an BenignNetworkingError +func IsBenignNetworkingError(err error) bool { + var e BenignNetworkingError + return errors.As(err, &e) +} diff --git a/network/p2p/errors.go b/network/p2p/errors.go new file mode 100644 index 00000000000..adf708ce174 --- /dev/null +++ b/network/p2p/errors.go @@ -0,0 +1,28 @@ +package p2p + +import ( + "errors" + "fmt" +) + +// MiddlewareStartError covers the entire class of errors returned from Middleware.start . +// This error is returned if the Middleware fails to start for any reason. +// The networking layer will not be functional if this error condition is encountered. +type MiddlewareStartError struct { + err error +} + +func NewMiddlewareStartErrorf(msg string, args ...interface{}) error { + return MiddlewareStartError{ + err: fmt.Errorf(msg, args...), + } +} + +func (e MiddlewareStartError) Error() string { return e.err.Error() } +func (e MiddlewareStartError) Unwrap() error { return e.err } + +// IsMiddlewareStartError returns whether err is an MiddlewareStartError +func IsMiddlewareStartError(err error) bool { + var e MiddlewareStartError + return errors.As(err, &e) +} diff --git a/network/p2p/middleware.go b/network/p2p/middleware.go index fbc9fa3ed9d..b9e404f0435 100644 --- a/network/p2p/middleware.go +++ b/network/p2p/middleware.go @@ -64,6 +64,8 @@ var ( // allowAll is a peerFilterFunc that will always return true for all peer ids. // This filter is used to allow communication by all roles on public network channels. allowAll = func(_ peer.ID) bool { return true } + + ErrMissingOverlay = errors.New("overlay must be configured by calling SetOverlay before middleware can be started") ) // peerFilterFunc is a func type that will be used in the TopicValidator to filter @@ -128,6 +130,8 @@ func WithPeerManager(peerManagerFunc PeerManagerFactoryFunc) MiddlewareOption { // connectionGating if set to True, restricts this node to only talk to other nodes which are part of the identity list // managePeerConnections if set to True, enables the default PeerManager which continuously updates the node's peer connections // validators are the set of the different message validators that each inbound messages is passed through +// During normal operations, the following errors are expected to be thrown by the irrecoverable.SignalerContext: +// * MiddlewareStartError if middleware fails to start for any reason. func NewMiddleware( log zerolog.Logger, libP2PNodeFactory LibP2PFactoryFunc, @@ -209,6 +213,8 @@ func (m *Middleware) NewPingService(pingProtocol protocol.ID, provider network.P return NewPingService(m.libP2PNode.Host(), pingProtocol, m.log, provider) } +// topologyPeers callback used by the peer manager that the list of peer ID's +// which this node should be directly connected to as peers. func (m *Middleware) topologyPeers() (peer.IDSlice, error) { identities, err := m.ov.Topology() if err != nil { @@ -242,8 +248,15 @@ func (m *Middleware) Me() flow.Identifier { } // GetIPPort returns the ip address and port number associated with the middleware +// During normal operations, the following benign errors are expected: +// * BenignNetworkingError if the libP2P node fails to return the IP and port. func (m *Middleware) GetIPPort() (string, string, error) { - return m.libP2PNode.GetIPPort() + ipOrHostname, port, err := m.libP2PNode.GetIPPort() + if err != nil { + return "", "", network.NewBenignNetworkingErrorf("failed to get ip and port from libP2P node: %w", err) + } + + return ipOrHostname, port, nil } func (m *Middleware) UpdateNodeAddresses() { @@ -276,20 +289,27 @@ func (m *Middleware) SetOverlay(ov network.Overlay) { } // start will start the middleware. +// During normal operations, the following errors are expected: +// * MiddlewareStartError if +// - the overlay is not set on the Middleware. +// - creating the libP2P node fails. +// - registering the preferred unicast protocols fails. +// - creating the peer manager fails. +// - starting the peer manager fails. func (m *Middleware) start(ctx context.Context) error { if m.ov == nil { - return errors.New("overlay must be configured by calling SetOverlay before middleware can be started") + return NewMiddlewareStartErrorf("could not start middleware: %w", ErrMissingOverlay) } libP2PNode, err := m.libP2PNodeFactory(ctx) if err != nil { - return fmt.Errorf("could not create libp2p node: %w", err) + return NewMiddlewareStartErrorf("could not create libp2p node: %w", err) } m.libP2PNode = libP2PNode err = m.libP2PNode.WithDefaultUnicastProtocol(m.handleIncomingStream, m.preferredUnicasts) if err != nil { - return fmt.Errorf("could not register preferred unicast protocols on libp2p node: %w", err) + return NewMiddlewareStartErrorf("could not register preferred unicast protocols on libp2p node: %w", err) } m.UpdateNodeAddresses() @@ -298,14 +318,14 @@ func (m *Middleware) start(ctx context.Context) error { if m.peerManagerFactory != nil { m.peerManager, err = m.peerManagerFactory(m.libP2PNode.host, m.topologyPeers, m.log) if err != nil { - return fmt.Errorf("failed to create peer manager: %w", err) + return NewMiddlewareStartErrorf("failed to create peer manager: %w", err) } select { case <-m.peerManager.Ready(): m.log.Debug().Msg("peer manager successfully started") case <-time.After(30 * time.Second): - return fmt.Errorf("could not start peer manager") + return NewMiddlewareStartErrorf("could not start peer manager") } } @@ -340,11 +360,21 @@ func (m *Middleware) stop() { // // Dispatch should be used whenever guaranteed delivery to a specific target is required. Otherwise, Publish is // a more efficient candidate. +// +// During normal operations, the following benign errors are expected: +// * BenignNetworkingError if +// - the peer ID for the target node ID cannot be found. +// - the msg size exceeds result returned from unicastMaxMsgSize(msg) +// - the libP2P node fails to publish the message. +// - the libP2P node fails to create the stream. +// - setting write deadline on the stream fails. +// - the gogo protobuf writer fails to write the message. +// - flushing the stream fails. func (m *Middleware) SendDirect(msg *message.Message, targetID flow.Identifier) (err error) { // translates identifier to peer id peerID, err := m.idTranslator.GetPeerID(targetID) if err != nil { - return fmt.Errorf("could not find peer id for target id: %w", err) + return network.NewBenignNetworkingErrorf("could not find peer id for target id: %w", err) } maxMsgSize := unicastMaxMsgSize(msg) @@ -352,7 +382,7 @@ func (m *Middleware) SendDirect(msg *message.Message, targetID flow.Identifier) // message size goes beyond maximum size that the serializer can handle. // proceeding with this message results in closing the connection by the target side, and // delivery failure. - return fmt.Errorf("message size %d exceeds configured max message size %d", msg.Size(), maxMsgSize) + return network.NewBenignNetworkingErrorf("message size %d exceeds configured max message size %d", msg.Size(), maxMsgSize) } maxTimeout := m.unicastMaxMsgDuration(msg) @@ -376,7 +406,7 @@ func (m *Middleware) SendDirect(msg *message.Message, targetID flow.Identifier) // sent out the receiver stream, err := m.libP2PNode.CreateStream(ctx, peerID) if err != nil { - return fmt.Errorf("failed to create stream for %s: %w", targetID, err) + return network.NewBenignNetworkingErrorf("failed to create stream for %s: %w", targetID, err) } success := false @@ -402,7 +432,7 @@ func (m *Middleware) SendDirect(msg *message.Message, targetID flow.Identifier) deadline, _ := ctx.Deadline() err = stream.SetWriteDeadline(deadline) if err != nil { - return fmt.Errorf("failed to set write deadline for stream: %w", err) + return network.NewBenignNetworkingErrorf("failed to set write deadline for stream: %w", err) } // create a gogo protobuf writer @@ -411,13 +441,13 @@ func (m *Middleware) SendDirect(msg *message.Message, targetID flow.Identifier) err = writer.WriteMsg(msg) if err != nil { - return fmt.Errorf("failed to send message to %s: %w", targetID, err) + return network.NewBenignNetworkingErrorf("failed to send message to %s: %w", targetID, err) } // flush the stream err = bufw.Flush() if err != nil { - return fmt.Errorf("failed to flush stream for %s: %w", targetID, err) + return network.NewBenignNetworkingErrorf("failed to flush stream for %s: %w", targetID, err) } success = true @@ -525,6 +555,9 @@ func (m *Middleware) handleIncomingStream(s libp2pnetwork.Stream) { } // Subscribe subscribes the middleware to a channel. +// During normal operations, the following benign errors are expected: +// * BenignNetworkingError if the libP2P node fails to subscribe to the topic +// created from the provided channel. func (m *Middleware) Subscribe(channel network.Channel) error { topic := network.TopicFromChannel(channel, m.rootBlockID) @@ -548,7 +581,7 @@ func (m *Middleware) Subscribe(channel network.Channel) error { s, err := m.libP2PNode.Subscribe(topic, m.codec, peerFilter, validators...) if err != nil { - return fmt.Errorf("failed to subscribe for channel %s: %w", channel, err) + return network.NewBenignNetworkingErrorf("could not subscribe to topic (%s): %w", topic, err) } // create a new readSubscription with the context of the middleware @@ -565,11 +598,14 @@ func (m *Middleware) Subscribe(channel network.Channel) error { } // Unsubscribe unsubscribes the middleware from a channel. +// During normal operations, the following benign errors are expected: +// * BenignNetworkingError if the libP2P node fails to unsubscribe to the topic +// created from the provided channel. func (m *Middleware) Unsubscribe(channel network.Channel) error { topic := network.TopicFromChannel(channel, m.rootBlockID) err := m.libP2PNode.UnSubscribe(topic) if err != nil { - return fmt.Errorf("failed to unsubscribe from channel %s: %w", channel, err) + return network.NewBenignNetworkingErrorf("failed to unsubscribe from channel (%s): %w", channel, err) } // update peers to remove nodes subscribed to channel @@ -622,6 +658,11 @@ func (m *Middleware) processMessage(msg *message.Message, decodedMsgPayload inte // Publish publishes a message on the channel. It models a distributed broadcast where the message is meant for all or // a many nodes subscribing to the channel. It does not guarantee the delivery though, and operates on a best // effort. +// During normal operations, the following benign errors are expected: +// * BenignNetworkingError if +// - the msg cannot be marshalled. +// - the msg size exceeds DefaultMaxPubSubMsgSize. +// - the libP2P node fails to publish the message. func (m *Middleware) Publish(msg *message.Message, channel network.Channel) error { m.log.Debug().Str("channel", channel.String()).Interface("msg", msg).Msg("publishing new message") @@ -630,14 +671,14 @@ func (m *Middleware) Publish(msg *message.Message, channel network.Channel) erro data, err := msg.Marshal() //binstat.LeaveVal(bs, int64(len(data))) if err != nil { - return fmt.Errorf("failed to marshal the message: %w", err) + return network.NewBenignNetworkingErrorf("failed to marshal the message: %w", err) } msgSize := len(data) if msgSize > DefaultMaxPubSubMsgSize { // libp2p pubsub will silently drop the message if its size is greater than the configured pubsub max message size // hence return an error as this message is undeliverable - return fmt.Errorf("message size %d exceeds configured max message size %d", msgSize, DefaultMaxPubSubMsgSize) + return network.NewBenignNetworkingErrorf("message size %d exceeds configured max message size %d", msgSize, DefaultMaxPubSubMsgSize) } topic := network.TopicFromChannel(channel, m.rootBlockID) @@ -645,7 +686,7 @@ func (m *Middleware) Publish(msg *message.Message, channel network.Channel) erro // publish the bytes on the topic err = m.libP2PNode.Publish(m.ctx, topic, data) if err != nil { - return fmt.Errorf("failed to publish the message: %w", err) + return network.NewBenignNetworkingErrorf("failed to publish the message: %w", err) } m.metrics.NetworkMessageSent(len(data), string(channel), msg.Type) @@ -654,10 +695,12 @@ func (m *Middleware) Publish(msg *message.Message, channel network.Channel) erro } // IsConnected returns true if this node is connected to the node with id nodeID. +// During normal operations, the following benign errors are expected: +// * BenignNetworkingError if the peer ID for the target node ID cannot be found. func (m *Middleware) IsConnected(nodeID flow.Identifier) (bool, error) { peerID, err := m.idTranslator.GetPeerID(nodeID) if err != nil { - return false, fmt.Errorf("could not find peer id for target id: %w", err) + return false, network.NewBenignNetworkingErrorf("could not find peer id for target id: %w", err) } return m.libP2PNode.IsConnected(peerID) } From 2e23a748482faebb5219966b8c3b48627a3e1025 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 29 Jun 2022 11:59:11 -0400 Subject: [PATCH 062/223] Update access_api_proxy_test.go --- apiproxy/access_api_proxy_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/apiproxy/access_api_proxy_test.go b/apiproxy/access_api_proxy_test.go index 3a2cf28bb17..85be5054c09 100644 --- a/apiproxy/access_api_proxy_test.go +++ b/apiproxy/access_api_proxy_test.go @@ -9,6 +9,7 @@ import ( "github.com/onflow/flow/protobuf/go/flow/access" "google.golang.org/grpc" + grpcinsecure "google.golang.org/grpc/credentials/insecure" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/grpcutils" @@ -245,7 +246,7 @@ func openFlowLite(address string) error { c, err := grpc.Dial( "unix://"+address, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcutils.DefaultMaxMsgSize)), - grpc.WithInsecure()) + grpc.WithTransportCredentials(grpcinsecure.NewCredentials())) if err != nil { return err } From 0016bd836a8d0abdf57aa09ae9915be504627913 Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Wed, 29 Jun 2022 12:25:49 -0400 Subject: [PATCH 063/223] comments and warning log --- consensus/hotstuff/follower_loop.go | 3 ++- engine/common/follower/engine.go | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/consensus/hotstuff/follower_loop.go b/consensus/hotstuff/follower_loop.go index 3c9cdffe358..894b75d5912 100644 --- a/consensus/hotstuff/follower_loop.go +++ b/consensus/hotstuff/follower_loop.go @@ -11,9 +11,10 @@ import ( "github.com/onflow/flow-go/utils/logging" ) +// proposalTask struct used to send a proposal and done channel in one message type proposalTask struct { *model.Proposal - done chan struct{} + done chan struct{} // closed when the proposal has finished being processed } // FollowerLoop implements interface FollowerLoop diff --git a/engine/common/follower/engine.go b/engine/common/follower/engine.go index 9377ec12ba6..83451664e4e 100644 --- a/engine/common/follower/engine.go +++ b/engine/common/follower/engine.go @@ -373,6 +373,8 @@ func (e *Engine) processBlockAndDescendants(ctx context.Context, proposal *messa case <-e.follower.SubmitProposal(header, parent.View): break case <-time.After(time.Millisecond * 200): + // this shouldn't happen very often. 99.8% of proposals are processed within 150ms + e.log.Warn().Msg("HotStuffFollower SubmitProposal timeout") break } // check for any descendants of the block to process From affd26bbc67efa1e1644db50c1ffbe37dca1620b Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Wed, 29 Jun 2022 12:47:43 -0400 Subject: [PATCH 064/223] fixed interface --- module/hotstuff.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/module/hotstuff.go b/module/hotstuff.go index 22ac9fdfa13..b2a52813b17 100644 --- a/module/hotstuff.go +++ b/module/hotstuff.go @@ -47,5 +47,5 @@ type HotStuffFollower interface { // // Block proposals must be submitted in order, i.e. a proposal's parent must // have been previously processed by the HotStuffFollower. - SubmitProposal(proposal *flow.Header, parentView uint64) (done chan struct{}) + SubmitProposal(proposal *flow.Header, parentView uint64) (done <-chan struct{}) } From 06597b1b94a96852bd13a8195cb2902b41be1d73 Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Wed, 29 Jun 2022 13:50:40 -0400 Subject: [PATCH 065/223] update to read only channel --- engine/common/follower/engine_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/engine/common/follower/engine_test.go b/engine/common/follower/engine_test.go index c119384569a..1cf55ce4bf0 100644 --- a/engine/common/follower/engine_test.go +++ b/engine/common/follower/engine_test.go @@ -207,8 +207,8 @@ func (suite *Suite) TestHandleProposalWithPendingChildren() { suite.headers.On("ByBlockID", parent.ID()).Return(parent.Header, nil) suite.headers.On("ByBlockID", block.ID()).Return(block.Header, nil).Once() // should submit to follower - suite.follower.On("SubmitProposal", block.Header, parent.Header.View).Once().Return(make(chan struct{})) - suite.follower.On("SubmitProposal", child.Header, block.Header.View).Once().Return(make(chan struct{})) + suite.follower.On("SubmitProposal", block.Header, parent.Header.View).Once().Return(make(<-chan struct{})) + suite.follower.On("SubmitProposal", child.Header, block.Header.View).Once().Return(make(<-chan struct{})) // we have one pending child cached pending := []*flow.PendingBlock{ From 07a99d624f5a89ebf1ec3e398df5465e2035c462 Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Wed, 29 Jun 2022 16:23:24 -0400 Subject: [PATCH 066/223] update to read only channel --- engine/common/follower/engine_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/common/follower/engine_test.go b/engine/common/follower/engine_test.go index 1cf55ce4bf0..edf0fe3617a 100644 --- a/engine/common/follower/engine_test.go +++ b/engine/common/follower/engine_test.go @@ -141,7 +141,7 @@ func (suite *Suite) TestHandleProposal() { // we do not have any children cached suite.cache.On("ByParentID", block.ID()).Return(nil, false) // the proposal should be forwarded to the follower - suite.follower.On("SubmitProposal", block.Header, parent.Header.View).Once().Return(make(chan struct{})) + suite.follower.On("SubmitProposal", block.Header, parent.Header.View).Once().Return(make(<-chan struct{})) // submit the block proposal := unittest.ProposalFromBlock(&block) From 5e843e0bb79c2a3dddfcfb7b5afe1cdfa0eb7210 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 30 Jun 2022 10:24:52 -0400 Subject: [PATCH 067/223] Update network/message/authorization.go Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- network/message/authorization.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index cbf8e95179a..c369a4774f5 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -16,7 +16,7 @@ import ( type MsgAuthConfig struct { // Name is the string representation of the message type. Name string - // Type a func that returns a new instance of message type. + // Type is a func that returns a new instance of message type. Type func() interface{} // Config is the mapping of network channel to list of authorized flow roles. Config map[channels.Channel]flow.RoleList From 29196e61a34de8f4392c45068ca63d08c846f354 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 30 Jun 2022 10:44:46 -0400 Subject: [PATCH 068/223] use cancellable context when building node fixture --- network/p2p/topic_validator_test.go | 52 +++++++++++++++++++---------- 1 file changed, 34 insertions(+), 18 deletions(-) diff --git a/network/p2p/topic_validator_test.go b/network/p2p/topic_validator_test.go index 319cb45e3b4..b49c2636b50 100644 --- a/network/p2p/topic_validator_test.go +++ b/network/p2p/topic_validator_test.go @@ -36,8 +36,11 @@ func TestTopicValidator_Unstaked(t *testing.T) { sporkId := unittest.IdentifierFixture() - sn1, identity1 := nodeFixture(t, context.Background(), sporkId, "TestAuthorizedSenderValidator_Unauthorized", withRole(flow.RoleConsensus), withLogger(logger)) - sn2, _ := nodeFixture(t, context.Background(), sporkId, "TestAuthorizedSenderValidator_Unauthorized", withRole(flow.RoleConsensus), withLogger(logger)) + nodeFixtureCtx, nodeFixtureCtxCancel := context.WithCancel(context.Background()) + defer nodeFixtureCtxCancel() + + sn1, identity1 := nodeFixture(t, nodeFixtureCtx, sporkId, "TestAuthorizedSenderValidator_Unauthorized", withRole(flow.RoleConsensus), withLogger(logger)) + sn2, _ := nodeFixture(t, nodeFixtureCtx, sporkId, "TestAuthorizedSenderValidator_Unauthorized", withRole(flow.RoleConsensus), withLogger(logger)) channel := channels.ConsensusCommittee topic := channels.TopicFromChannel(channel, sporkId) @@ -47,7 +50,7 @@ func TestTopicValidator_Unstaked(t *testing.T) { translator, err := p2p.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) - // callback used by the topic validator to check if node is staked + // peer filter used by the topic validator to check if node is staked isStaked := func(pid peer.ID) bool { fid, err := translator.GetFlowID(pid) if err != nil { @@ -109,11 +112,12 @@ func TestTopicValidator_PublicChannel(t *testing.T) { logger := zerolog.New(os.Stdout).Level(zerolog.WarnLevel).Hook(hook) sporkId := unittest.IdentifierFixture() - identity1, privateKey1 := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleConsensus)) - sn1 := createNode(t, identity1.NodeID, privateKey1, sporkId, logger) - identity2, privateKey2 := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleConsensus)) - sn2 := createNode(t, identity2.NodeID, privateKey2, sporkId, zerolog.Nop()) + nodeFixtureCtx, nodeFixtureCtxCancel := context.WithCancel(context.Background()) + defer nodeFixtureCtxCancel() + + sn1, _ := nodeFixture(t, nodeFixtureCtx, sporkId, "TestTopicValidator_PublicChannel", withRole(flow.RoleConsensus), withLogger(logger)) + sn2, _ := nodeFixture(t, nodeFixtureCtx, sporkId, "TestTopicValidator_PublicChannel", withRole(flow.RoleConsensus), withLogger(logger)) // unauthenticated messages should not be dropped on public channels channel := channels.PublicSyncCommittee @@ -174,9 +178,12 @@ func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { }) logger := zerolog.New(os.Stdout).Level(zerolog.ErrorLevel).Hook(hook) - sn1, identity1 := nodeFixture(t, context.Background(), sporkId, "TestAuthorizedSenderValidator_InvalidMsg", withRole(flow.RoleConsensus)) - sn2, identity2 := nodeFixture(t, context.Background(), sporkId, "TestAuthorizedSenderValidator_InvalidMsg", withRole(flow.RoleConsensus)) - an1, identity3 := nodeFixture(t, context.Background(), sporkId, "TestAuthorizedSenderValidator_InvalidMsg", withRole(flow.RoleAccess)) + nodeFixtureCtx, nodeFixtureCtxCancel := context.WithCancel(context.Background()) + defer nodeFixtureCtxCancel() + + sn1, identity1 := nodeFixture(t, nodeFixtureCtx, sporkId, "TestAuthorizedSenderValidator_InvalidMsg", withRole(flow.RoleConsensus)) + sn2, identity2 := nodeFixture(t, nodeFixtureCtx, sporkId, "TestAuthorizedSenderValidator_InvalidMsg", withRole(flow.RoleConsensus)) + an1, identity3 := nodeFixture(t, nodeFixtureCtx, sporkId, "TestAuthorizedSenderValidator_InvalidMsg", withRole(flow.RoleAccess)) channel := channels.ConsensusCommittee topic := channels.TopicFromChannel(channel, sporkId) @@ -270,8 +277,11 @@ func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { func TestAuthorizedSenderValidator_InvalidMsg(t *testing.T) { sporkId := unittest.IdentifierFixture() - sn1, identity1 := nodeFixture(t, context.Background(), sporkId, "consensus_1", withRole(flow.RoleConsensus)) - sn2, identity2 := nodeFixture(t, context.Background(), sporkId, "consensus_2", withRole(flow.RoleConsensus)) + nodeFixtureCtx, nodeFixtureCtxCancel := context.WithCancel(context.Background()) + defer nodeFixtureCtxCancel() + + sn1, identity1 := nodeFixture(t, nodeFixtureCtx, sporkId, "consensus_1", withRole(flow.RoleConsensus)) + sn2, identity2 := nodeFixture(t, nodeFixtureCtx, sporkId, "consensus_2", withRole(flow.RoleConsensus)) // try to publish BlockProposal on invalid SyncCommittee channel channel := channels.SyncCommittee @@ -341,9 +351,12 @@ func TestAuthorizedSenderValidator_InvalidMsg(t *testing.T) { func TestAuthorizedSenderValidator_Ejected(t *testing.T) { sporkId := unittest.IdentifierFixture() - sn1, identity1 := nodeFixture(t, context.Background(), sporkId, "consensus_1", withRole(flow.RoleConsensus)) - sn2, identity2 := nodeFixture(t, context.Background(), sporkId, "consensus_2", withRole(flow.RoleConsensus)) - an1, identity3 := nodeFixture(t, context.Background(), sporkId, "access_1", withRole(flow.RoleAccess)) + nodeFixtureCtx, nodeFixtureCtxCancel := context.WithCancel(context.Background()) + defer nodeFixtureCtxCancel() + + sn1, identity1 := nodeFixture(t, nodeFixtureCtx, sporkId, "consensus_1", withRole(flow.RoleConsensus)) + sn2, identity2 := nodeFixture(t, nodeFixtureCtx, sporkId, "consensus_2", withRole(flow.RoleConsensus)) + an1, identity3 := nodeFixture(t, nodeFixtureCtx, sporkId, "access_1", withRole(flow.RoleAccess)) channel := channels.ConsensusCommittee topic := channels.TopicFromChannel(channel, sporkId) @@ -434,9 +447,12 @@ func TestAuthorizedSenderValidator_Ejected(t *testing.T) { func TestAuthorizedSenderValidator_ClusterChannel(t *testing.T) { sporkId := unittest.IdentifierFixture() - ln1, identity1 := nodeFixture(t, context.Background(), sporkId, "collection_1", withRole(flow.RoleCollection)) - ln2, identity2 := nodeFixture(t, context.Background(), sporkId, "collection_2", withRole(flow.RoleCollection)) - ln3, identity3 := nodeFixture(t, context.Background(), sporkId, "collection_3", withRole(flow.RoleCollection)) + nodeFixtureCtx, nodeFixtureCtxCancel := context.WithCancel(context.Background()) + defer nodeFixtureCtxCancel() + + ln1, identity1 := nodeFixture(t, nodeFixtureCtx, sporkId, "collection_1", withRole(flow.RoleCollection)) + ln2, identity2 := nodeFixture(t, nodeFixtureCtx, sporkId, "collection_2", withRole(flow.RoleCollection)) + ln3, identity3 := nodeFixture(t, nodeFixtureCtx, sporkId, "collection_3", withRole(flow.RoleCollection)) channel := channels.SyncCluster(flow.Testnet) topic := channels.TopicFromChannel(channel, sporkId) From b80a114c37e23132edb7c09e82e753c00ff9dcbb Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 30 Jun 2022 10:46:37 -0400 Subject: [PATCH 069/223] update log statement remove tag not on master --- insecure/corruptible/factory.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/insecure/corruptible/factory.go b/insecure/corruptible/factory.go index 53953cb3e4b..2caafe2f1c8 100644 --- a/insecure/corruptible/factory.go +++ b/insecure/corruptible/factory.go @@ -239,11 +239,7 @@ func (c *ConduitFactory) processAttackerMessage(msg *insecure.Message) error { return fmt.Errorf("could not convert target ids from byte to identifiers: %w", err) } - lg = lg.With(). - Str("target_ids", fmt.Sprintf("%v", targetIds)). - Uint32("targets_num", msg.TargetNum). - Logger() - + lg = lg.With().Str("target_ids", fmt.Sprintf("%v", msg.TargetIDs)).Logger() err = c.sendOnNetwork(event, channels.Channel(msg.ChannelID), msg.Protocol, uint(msg.TargetNum), targetIds...) if err != nil { From ae051439548de59e6728230452e8ac379deb2a4b Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 30 Jun 2022 10:46:52 -0400 Subject: [PATCH 070/223] Update network/message/authorization.go Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- network/message/authorization.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index c369a4774f5..aff25b0dac3 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -25,8 +25,8 @@ type MsgAuthConfig struct { // IsAuthorized checks if the specified role is authorized to send the message on channel and // asserts that the message is authorized to be sent on channel. // Expected error returns during normal operations: -// * ErrUnauthorizedMessageOnChannel: if channel does not exist in message config -// * ErrUnauthorizedRole: if list of authorized roles for message config does not include role +// * ErrUnauthorizedMessageOnChannel: the channel is not included in the message's list of authorized channels +// * ErrUnauthorizedRole: the role is not included in the message's list of authorized roles for the provided channel func (m MsgAuthConfig) IsAuthorized(role flow.Role, channel channels.Channel) error { authorizedRoles, ok := m.Config[channel] if !ok { From a0a43205a76718b9e21f2ac4ec12d85171f82c6d Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 30 Jun 2022 10:47:08 -0400 Subject: [PATCH 071/223] Update network/validator/pubsub/authorized_sender_validator_test.go Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- network/validator/pubsub/authorized_sender_validator_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index 55bfacc9f38..483deeaa25f 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -144,10 +144,15 @@ func (s *TestIsAuthorizedSenderSuite) initializeAuthorizationTestCases() { // initializeInvalidMessageOnChannelTestCases initializes test cases for all possible combinations of invalid message types on channel. // NOTE: the role in the test case does not matter since ErrUnauthorizedMessageOnChannel will be returned before the role is checked. func (s *TestIsAuthorizedSenderSuite) initializeInvalidMessageOnChannelTestCases() { + // iterate all channels for _, c := range message.AuthorizationConfigs { for channel, authorizedRoles := range c.Config { identity := unittest.IdentityFixture(unittest.WithRole(authorizedRoles[0])) + + // iterate all message types for _, config := range message.AuthorizationConfigs { + + // include test if message type is not authorized on channel _, ok := config.Config[channel] if config.Name != c.Name && !ok { tc := TestCase{ From bd63592da1b7a0ed879c34d6a7f10a4386b0c1a2 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 30 Jun 2022 10:47:21 -0400 Subject: [PATCH 072/223] Update network/message/authorization.go Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- network/message/authorization.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index aff25b0dac3..40c6711cbfd 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -22,8 +22,8 @@ type MsgAuthConfig struct { Config map[channels.Channel]flow.RoleList } -// IsAuthorized checks if the specified role is authorized to send the message on channel and -// asserts that the message is authorized to be sent on channel. +// IsAuthorized checks if the specified role is authorized to send the message on the provided channel and +// asserts that the message is authorized to be sent on the channel. // Expected error returns during normal operations: // * ErrUnauthorizedMessageOnChannel: the channel is not included in the message's list of authorized channels // * ErrUnauthorizedRole: the role is not included in the message's list of authorized roles for the provided channel From 7d77421e4920107152920e5effeafd4821dc13fa Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 30 Jun 2022 11:14:57 -0400 Subject: [PATCH 073/223] restructure switch statement default case --- network/validator/pubsub/authorized_sender_validator.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index 399b3d41f31..083544a6b75 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -45,6 +45,8 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel channels.Channel, get msgType, err := IsAuthorizedSender(identity, channel, msg) switch { + case err == nil: + return pubsub.ValidationAccept case errors.Is(err, ErrUnauthorizedSender): slashingViolationsConsumer.OnUnAuthorizedSenderError(identity, from.String(), msgType, err) return pubsub.ValidationReject @@ -54,7 +56,7 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel channels.Channel, get case errors.Is(err, ErrSenderEjected): slashingViolationsConsumer.OnSenderEjectedError(identity, from.String(), msgType, err) return pubsub.ValidationReject - case err != nil: + default: log.Error(). Err(err). Str("peer_id", from.String()). @@ -63,8 +65,6 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel channels.Channel, get Str("message_type", msgType). Msg("unexpected error during message validation") return pubsub.ValidationReject - default: - return pubsub.ValidationAccept } } } From 4c28d10ac21d58f8184dff5e0799d6d85536a070 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 30 Jun 2022 11:15:30 -0400 Subject: [PATCH 074/223] log error if identity not found in authorized sender callback --- network/validator/pubsub/authorized_sender_validator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index 083544a6b75..f32d9256187 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -39,7 +39,7 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel channels.Channel, get return func(ctx context.Context, from peer.ID, msg interface{}) pubsub.ValidationResult { identity, ok := getIdentity(from) if !ok { - log.Warn().Err(ErrIdentityUnverified).Str("peer_id", from.String()).Msg("rejecting message") + log.Error().Err(ErrIdentityUnverified).Str("peer_id", from.String()).Msg("rejecting message") return pubsub.ValidationReject } From a943bad7a4f65922a43841414b7e1f821261a6de Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 30 Jun 2022 11:16:15 -0400 Subject: [PATCH 075/223] Update network/validator/pubsub/authorized_sender_validator_test.go Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- network/validator/pubsub/authorized_sender_validator_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index 483deeaa25f..ad27120d7ec 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -101,6 +101,7 @@ func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_ValidationFailure() *messages.BlockProposal } + // *validator.msg is not a known message type, but embeds *messages.BlockProposal which is m := &msg{&messages.BlockProposal{ Header: nil, Payload: nil, From 04f4ce7ab10f11e2e715a56d6dbf8ab79e9a6bda Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 30 Jun 2022 11:16:34 -0400 Subject: [PATCH 076/223] Update network/validator/pubsub/authorized_sender_validator_test.go Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- network/validator/pubsub/authorized_sender_validator_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index ad27120d7ec..f8e512d272e 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -107,6 +107,7 @@ func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_ValidationFailure() Payload: nil, }} + // unknown message types are rejected msgType, err := IsAuthorizedSender(identity, channels.ConsensusCommittee, m) s.Require().ErrorIs(err, ErrUnknownMessageType) s.Require().Equal("", msgType) From 886cf3321d080b4fab27a73835357cacc3979b5f Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 30 Jun 2022 11:16:55 -0400 Subject: [PATCH 077/223] Update network/validator/pubsub/authorized_sender_validator_test.go Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- network/validator/pubsub/authorized_sender_validator_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index f8e512d272e..286505c8b5d 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -112,6 +112,7 @@ func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_ValidationFailure() s.Require().ErrorIs(err, ErrUnknownMessageType) s.Require().Equal("", msgType) + // nil messages are rejected msgType, err = IsAuthorizedSender(identity, channels.ConsensusCommittee, nil) s.Require().ErrorIs(err, ErrUnknownMessageType) s.Require().Equal("", msgType) From 4635513f15788f57c074f8268e45b546c214093e Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 30 Jun 2022 11:17:23 -0400 Subject: [PATCH 078/223] Update network/validator/pubsub/authorized_sender_validator.go Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- network/validator/pubsub/authorized_sender_validator.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index 399b3d41f31..5497082ea7e 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -73,8 +73,7 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel channels.Channel, get // 1. The node is not ejected. // 2. Using the message auth config // A. The message is authorized to be sent on channel. -// B. The sender role is authorized to send message channel. -// C. The sender role is authorized to participate on channel. +// B. The sender role is authorized to send message on channel. // Expected error returns during normal operations: // * ErrSenderEjected: if identity of sender is ejected // * ErrUnknownMessageType: if retrieving the message auth config for msg fails From 63d84c2d9abf3ddc45434b5dccdbc4aaffb1b13b Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 30 Jun 2022 11:18:07 -0400 Subject: [PATCH 079/223] Update network/validator/pubsub/authorized_sender_validator.go Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- network/validator/pubsub/authorized_sender_validator.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index 5497082ea7e..eeaf392c520 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -75,9 +75,9 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel channels.Channel, get // A. The message is authorized to be sent on channel. // B. The sender role is authorized to send message on channel. // Expected error returns during normal operations: -// * ErrSenderEjected: if identity of sender is ejected -// * ErrUnknownMessageType: if retrieving the message auth config for msg fails -// * ErrUnauthorizedSender: if the message auth config validation for msg fails +// * ErrSenderEjected: if identity of sender is ejected from the network +// * ErrUnknownMessageType: if the message type does not have an auth config +// * ErrUnauthorizedSender: if the sender is not authorized to send message on the channel func IsAuthorizedSender(identity *flow.Identity, channel channels.Channel, msg interface{}) (string, error) { if identity.Ejected { return "", ErrSenderEjected From 8054d1db372caebe3dc452cd52af3beef8c7b51e Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 30 Jun 2022 11:20:20 -0400 Subject: [PATCH 080/223] Update network/validator/pubsub/authorized_sender_validator.go Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- network/validator/pubsub/authorized_sender_validator.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index eeaf392c520..6f903d8b874 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -25,9 +25,10 @@ var ( ) // AuthorizedSenderValidator returns a MessageValidator that will check if the sender of a message is authorized to send the message. -// The MessageValidator returned will use the getIdentity to get the flow identity for the sender, asserting that the sender is a staked node. -// If the sender is an unstaked node the message is rejected. IsAuthorizedSender is used to perform further message validation. If validation -// fails the message is rejected, if the validation error is an expected error slashing data is collected before the message is rejected. +// The MessageValidator returned will use the getIdentity to get the flow identity for the sender, asserting that the sender is a staked node and not ejected. Otherwise, the message is rejected. +// The message is also authorized by checking that the sender is allowed to send the message on the channel. +// If validation fails the message is rejected, and if the validation error is an expected error, slashing data is also collected. +// Authorization config is defined in message.MsgAuthConfig func AuthorizedSenderValidator(log zerolog.Logger, channel channels.Channel, getIdentity func(peer.ID) (*flow.Identity, bool)) MessageValidator { log = log.With(). Str("component", "authorized_sender_validator"). From 27263e86fd90a161e696c1890cd29f690ea21175 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 30 Jun 2022 11:22:45 -0400 Subject: [PATCH 081/223] add sync cluster test case to TestIsAuthorizedSender_ClusterPrefixedChannels --- .../validator/pubsub/authorized_sender_validator_test.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index 55bfacc9f38..744171bb663 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -80,9 +80,16 @@ func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_UnAuthorizedMessage func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_ClusterPrefixedChannels() { identity := unittest.IdentityFixture(unittest.WithRole(flow.RoleCollection)) clusterID := flow.Localnet + + // collection consensus cluster msgType, err := IsAuthorizedSender(identity, channels.ConsensusCluster(clusterID), &messages.ClusterBlockResponse{}) s.Require().NoError(err) s.Require().Equal(message.ClusterBlockResponse, msgType) + + // collection sync cluster + msgType, err = IsAuthorizedSender(identity, channels.SyncCluster(clusterID), &messages.SyncRequest{}) + s.Require().NoError(err) + s.Require().Equal(message.SyncRequest, msgType) } // TestIsAuthorizedSender_ValidationFailure checks that IsAuthorizedSender returns the expected validation error. From 7e74be1ae9418926d1edd59cbf45b11edf8896bd Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 30 Jun 2022 11:32:21 -0400 Subject: [PATCH 082/223] update sync cluster config and entity request config --- network/message/authorization.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index 40c6711cbfd..a0b0dd98d15 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -79,7 +79,7 @@ func initializeMessageAuthConfigsMap() { }, Config: map[channels.Channel]flow.RoleList{ channels.SyncCommittee: flow.Roles(), - channels.SyncClusterPrefix: flow.Roles(), + channels.SyncClusterPrefix: {flow.RoleCollection}, }, } AuthorizationConfigs[SyncResponse] = MsgAuthConfig{ @@ -89,7 +89,7 @@ func initializeMessageAuthConfigsMap() { }, Config: map[channels.Channel]flow.RoleList{ channels.SyncCommittee: flow.Roles(), - channels.SyncClusterPrefix: flow.Roles(), + channels.SyncClusterPrefix: {flow.RoleCollection}, }, } AuthorizationConfigs[RangeRequest] = MsgAuthConfig{ @@ -99,7 +99,7 @@ func initializeMessageAuthConfigsMap() { }, Config: map[channels.Channel]flow.RoleList{ channels.SyncCommittee: flow.Roles(), - channels.SyncClusterPrefix: flow.Roles(), + channels.SyncClusterPrefix: {flow.RoleCollection}, }, } AuthorizationConfigs[BatchRequest] = MsgAuthConfig{ @@ -109,7 +109,7 @@ func initializeMessageAuthConfigsMap() { }, Config: map[channels.Channel]flow.RoleList{ channels.SyncCommittee: flow.Roles(), - channels.SyncClusterPrefix: flow.Roles(), + channels.SyncClusterPrefix: {flow.RoleCollection}, }, } AuthorizationConfigs[BlockResponse] = MsgAuthConfig{ @@ -119,7 +119,7 @@ func initializeMessageAuthConfigsMap() { }, Config: map[channels.Channel]flow.RoleList{ channels.SyncCommittee: flow.Roles(), - channels.SyncClusterPrefix: flow.Roles(), + channels.SyncClusterPrefix: {flow.RoleCollection}, }, } @@ -248,10 +248,10 @@ func initializeMessageAuthConfigsMap() { return new(messages.EntityRequest) }, Config: map[channels.Channel]flow.RoleList{ - channels.RequestChunks: {flow.RoleAccess, flow.RoleConsensus, flow.RoleCollection}, + channels.RequestChunks: {flow.RoleConsensus, flow.RoleCollection}, + channels.RequestApprovalsByChunk: {flow.RoleConsensus, flow.RoleCollection}, + channels.RequestReceiptsByBlockID: {flow.RoleConsensus, flow.RoleCollection}, channels.RequestCollections: {flow.RoleAccess, flow.RoleConsensus, flow.RoleCollection}, - channels.RequestApprovalsByChunk: {flow.RoleAccess, flow.RoleConsensus, flow.RoleCollection}, - channels.RequestReceiptsByBlockID: {flow.RoleAccess, flow.RoleConsensus, flow.RoleCollection}, }, } AuthorizationConfigs[EntityResponse] = MsgAuthConfig{ From 5570fcc7e382852558fac11c06296f4d11c9f3a9 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 30 Jun 2022 11:34:41 -0400 Subject: [PATCH 083/223] Update authorization.go --- network/message/authorization.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index a0b0dd98d15..cf356737b86 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -25,7 +25,7 @@ type MsgAuthConfig struct { // IsAuthorized checks if the specified role is authorized to send the message on the provided channel and // asserts that the message is authorized to be sent on the channel. // Expected error returns during normal operations: -// * ErrUnauthorizedMessageOnChannel: the channel is not included in the message's list of authorized channels +// * ErrUnauthorizedMessageOnChannel: the channel is not included in the message's list of authorized channels // * ErrUnauthorizedRole: the role is not included in the message's list of authorized roles for the provided channel func (m MsgAuthConfig) IsAuthorized(role flow.Role, channel channels.Channel) error { authorizedRoles, ok := m.Config[channel] From 1f894627163e96828853ddb9608412595041cf32 Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Mon, 4 Jul 2022 10:58:06 -0400 Subject: [PATCH 084/223] process block responses --- engine/common/follower/engine.go | 81 +++++++++++++++++++------ engine/common/synchronization/engine.go | 8 +-- 2 files changed, 65 insertions(+), 24 deletions(-) diff --git a/engine/common/follower/engine.go b/engine/common/follower/engine.go index 83451664e4e..d6248468313 100644 --- a/engine/common/follower/engine.go +++ b/engine/common/follower/engine.go @@ -24,6 +24,11 @@ import ( "github.com/onflow/flow-go/utils/logging" ) +type followerBlockProposal struct { + *messages.BlockProposal + Wait bool +} + type Engine struct { unit *engine.Unit log zerolog.Logger @@ -146,6 +151,12 @@ func (e *Engine) Process(channel network.Channel, originID flow.Identifier, even func (e *Engine) process(originID flow.Identifier, input interface{}) error { switch v := input.(type) { + case *messages.BlockResponse: + e.engMetrics.MessageReceived(metrics.EngineFollower, metrics.MessageBlockResponse) + defer e.engMetrics.MessageHandled(metrics.EngineFollower, metrics.MessageBlockResponse) + e.unit.Lock() + defer e.unit.Unlock() + return e.onBlockResponse(originID, v) case *events.SyncedBlock: e.engMetrics.MessageReceived(metrics.EngineFollower, metrics.MessageSyncedBlock) defer e.engMetrics.MessageHandled(metrics.EngineFollower, metrics.MessageSyncedBlock) @@ -157,7 +168,10 @@ func (e *Engine) process(originID flow.Identifier, input interface{}) error { defer e.engMetrics.MessageHandled(metrics.EngineFollower, metrics.MessageBlockProposal) e.unit.Lock() defer e.unit.Unlock() - return e.onBlockProposal(originID, v) + return e.onBlockProposal(originID, &followerBlockProposal{ + BlockProposal: v, + Wait: false, + }) default: return fmt.Errorf("invalid event type (%T)", input) } @@ -172,15 +186,37 @@ func (e *Engine) onSyncedBlock(originID flow.Identifier, synced *events.SyncedBl } // process as proposal - proposal := &messages.BlockProposal{ - Header: synced.Block.Header, - Payload: synced.Block.Payload, + proposal := &followerBlockProposal{ + BlockProposal: &messages.BlockProposal{ + Header: synced.Block.Header, + Payload: synced.Block.Payload, + }, + Wait: false, } return e.onBlockProposal(originID, proposal) } +func (e *Engine) onBlockResponse(originID flow.Identifier, res *messages.BlockResponse) error { + for _, block := range res.Blocks { + proposal := &followerBlockProposal{ + BlockProposal: &messages.BlockProposal{ + Header: block.Header, + Payload: block.Payload, + }, + Wait: true, + } + + // process block proposal with a wait + if err := e.onBlockProposal(originID, proposal); err != nil { + return err + } + } + + return nil +} + // onBlockProposal handles incoming block proposals. -func (e *Engine) onBlockProposal(originID flow.Identifier, proposal *messages.BlockProposal) error { +func (e *Engine) onBlockProposal(originID flow.Identifier, proposal *followerBlockProposal) error { span, ctx, _ := e.tracer.StartBlockSpan(context.Background(), proposal.Header.ID(), trace.FollowerOnBlockProposal) defer span.Finish() @@ -251,7 +287,7 @@ func (e *Engine) onBlockProposal(originID flow.Identifier, proposal *messages.Bl if found { // add the block to the cache - _ = e.pending.Add(originID, proposal) + _ = e.pending.Add(originID, proposal.BlockProposal) // go to the first missing ancestor ancestorID := ancestor.Header.ParentID @@ -281,7 +317,7 @@ func (e *Engine) onBlockProposal(originID flow.Identifier, proposal *messages.Bl _, err = e.headers.ByBlockID(header.ParentID) if errors.Is(err, storage.ErrNotFound) { - _ = e.pending.Add(originID, proposal) + _ = e.pending.Add(originID, proposal.BlockProposal) log.Debug().Msg("requesting missing parent for proposal") @@ -313,7 +349,7 @@ func (e *Engine) onBlockProposal(originID flow.Identifier, proposal *messages.Bl // The function assumes that `proposal` is connected to the finalized state. By induction, // any children are therefore also connected to the finalized state and can be processed as well. // No errors are expected during normal operations. -func (e *Engine) processBlockAndDescendants(ctx context.Context, proposal *messages.BlockProposal) error { +func (e *Engine) processBlockAndDescendants(ctx context.Context, proposal *followerBlockProposal) error { span, ctx := e.tracer.StartSpanFromContext(ctx, trace.FollowerProcessBlockProposal) defer span.Finish() @@ -369,14 +405,20 @@ func (e *Engine) processBlockAndDescendants(ctx context.Context, proposal *messa log.Info().Msg("forwarding block proposal to hotstuff") // submit the model to follower for processing - select { - case <-e.follower.SubmitProposal(header, parent.View): - break - case <-time.After(time.Millisecond * 200): - // this shouldn't happen very often. 99.8% of proposals are processed within 150ms - e.log.Warn().Msg("HotStuffFollower SubmitProposal timeout") - break + if proposal.Wait { + select { + case <-e.follower.SubmitProposal(header, parent.View): + // wait until the block is processed when the proposal is coming from a range response + break + case <-time.After(time.Millisecond * 200): + // this shouldn't happen very often. 99.8% of proposals are processed within 150ms + e.log.Warn().Msg("HotStuffFollower SubmitProposal timeout") + break + } + } else { + e.follower.SubmitProposal(header, parent.View) } + // check for any descendants of the block to process err = e.processPendingChildren(ctx, header) if err != nil { @@ -405,9 +447,12 @@ func (e *Engine) processPendingChildren(ctx context.Context, header *flow.Header // then try to process children only this once var result *multierror.Error for _, child := range children { - proposal := &messages.BlockProposal{ - Header: child.Header, - Payload: child.Payload, + proposal := &followerBlockProposal{ + BlockProposal: &messages.BlockProposal{ + Header: child.Header, + Payload: child.Payload, + }, + Wait: false, } err := e.processBlockAndDescendants(ctx, proposal) if err != nil { diff --git a/engine/common/synchronization/engine.go b/engine/common/synchronization/engine.go index c3ae630788d..5759818dd23 100644 --- a/engine/common/synchronization/engine.go +++ b/engine/common/synchronization/engine.go @@ -307,13 +307,9 @@ func (e *Engine) onBlockResponse(originID flow.Identifier, res *messages.BlockRe e.log.Debug().Uint64("height", block.Header.Height).Msg("block handler rejected") continue } - synced := &events.SyncedBlock{ - OriginID: originID, - Block: block, - } - - e.comp.SubmitLocal(synced) } + + e.comp.SubmitLocal(res) } // checkLoop will regularly scan for items that need requesting. From 21cdd6c09f9fab663e5b2fa55b3f9a5b0c87402f Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Mon, 4 Jul 2022 11:27:55 -0400 Subject: [PATCH 085/223] remove unused import --- engine/common/synchronization/engine.go | 1 - 1 file changed, 1 deletion(-) diff --git a/engine/common/synchronization/engine.go b/engine/common/synchronization/engine.go index 5759818dd23..1ad2c1b7805 100644 --- a/engine/common/synchronization/engine.go +++ b/engine/common/synchronization/engine.go @@ -13,7 +13,6 @@ import ( "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/fifoqueue" "github.com/onflow/flow-go/model/chainsync" - "github.com/onflow/flow-go/model/events" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" From 979ae68e8bd901f23ee9efb55590c8d64ff16842 Mon Sep 17 00:00:00 2001 From: Daniel Holmes <43529937+danielholmes839@users.noreply.github.com> Date: Mon, 4 Jul 2022 12:14:08 -0400 Subject: [PATCH 086/223] Update consensus/hotstuff/follower_loop.go Co-authored-by: Leo Zhang --- consensus/hotstuff/follower_loop.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/hotstuff/follower_loop.go b/consensus/hotstuff/follower_loop.go index 894b75d5912..c51cec20089 100644 --- a/consensus/hotstuff/follower_loop.go +++ b/consensus/hotstuff/follower_loop.go @@ -31,7 +31,7 @@ func NewFollowerLoop(log zerolog.Logger, followerLogic FollowerLogic) (*Follower return &FollowerLoop{ log: log, followerLogic: followerLogic, - proposals: make(chan proposalTask), + proposals: make(chan *proposalTask), runner: runner.NewSingleRunner(), }, nil } From 01b762c3bd10767601e75d71cf5dd2d83a410c81 Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Mon, 4 Jul 2022 14:24:34 -0400 Subject: [PATCH 087/223] send proposalTask by pointer --- consensus/hotstuff/follower_loop.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/hotstuff/follower_loop.go b/consensus/hotstuff/follower_loop.go index c51cec20089..b92581279ce 100644 --- a/consensus/hotstuff/follower_loop.go +++ b/consensus/hotstuff/follower_loop.go @@ -21,7 +21,7 @@ type proposalTask struct { type FollowerLoop struct { log zerolog.Logger followerLogic FollowerLogic - proposals chan proposalTask + proposals chan *proposalTask runner runner.SingleRunner // lock for preventing concurrent state transitions } @@ -43,7 +43,7 @@ func NewFollowerLoop(log zerolog.Logger, followerLogic FollowerLogic) (*Follower // have been previously processed by the FollowerLoop. func (fl *FollowerLoop) SubmitProposal(proposalHeader *flow.Header, parentView uint64) <-chan struct{} { received := time.Now() - proposal := proposalTask{ + proposal := &proposalTask{ Proposal: model.ProposalFromFlow(proposalHeader, parentView), done: make(chan struct{}), } From 94a749dae58a0557112e7caca8410ac55e312ba0 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 4 Jul 2022 20:26:56 -0400 Subject: [PATCH 088/223] return a validator callback that can be used for p2p or unicast validation - update AuthorizedSenderValidator to return (string, error) instead of pubsub validation result - Add simple wrapper func AuthorizedSenderMessageValidator that returns pubsub validation result allowing the callback returned by AuthorizedSenderValidator to be used for pubsub - update all test usages - unexport isAuthorizedSender --- network/p2p/middleware.go | 2 +- network/p2p/topic_validator_test.go | 8 +- .../pubsub/authorized_sender_validator.go | 37 +++-- .../authorized_sender_validator_test.go | 144 ++++++++++++------ utils/unittest/fixtures.go | 18 ++- 5 files changed, 150 insertions(+), 59 deletions(-) diff --git a/network/p2p/middleware.go b/network/p2p/middleware.go index 7297d9b7218..781ab501689 100644 --- a/network/p2p/middleware.go +++ b/network/p2p/middleware.go @@ -539,7 +539,7 @@ func (m *Middleware) Subscribe(channel channels.Channel) error { } else { // for channels used by the staked nodes, add the topic validator to filter out messages from non-staked nodes validators = append(validators, - psValidator.AuthorizedSenderValidator(m.log, channel, m.ov.Identity), + psValidator.AuthorizedSenderMessageValidator(m.log, channel, m.ov.Identity), ) // NOTE: For non-public channels the libP2P node topic validator will reject diff --git a/network/p2p/topic_validator_test.go b/network/p2p/topic_validator_test.go index b49c2636b50..f7cd0a17841 100644 --- a/network/p2p/topic_validator_test.go +++ b/network/p2p/topic_validator_test.go @@ -193,7 +193,7 @@ func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { translator, err := p2p.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) - authorizedSenderValidator := validator.AuthorizedSenderValidator(logger, channel, func(pid peer.ID) (*flow.Identity, bool) { + authorizedSenderValidator := validator.AuthorizedSenderMessageValidator(logger, channel, func(pid peer.ID) (*flow.Identity, bool) { fid, err := translator.GetFlowID(pid) if err != nil { return &flow.Identity{}, false @@ -300,7 +300,7 @@ func TestAuthorizedSenderValidator_InvalidMsg(t *testing.T) { }) logger := zerolog.New(os.Stdout).Level(zerolog.ErrorLevel).Hook(hook) - authorizedSenderValidator := validator.AuthorizedSenderValidator(logger, channel, func(pid peer.ID) (*flow.Identity, bool) { + authorizedSenderValidator := validator.AuthorizedSenderMessageValidator(logger, channel, func(pid peer.ID) (*flow.Identity, bool) { fid, err := translator.GetFlowID(pid) if err != nil { return &flow.Identity{}, false @@ -374,7 +374,7 @@ func TestAuthorizedSenderValidator_Ejected(t *testing.T) { }) logger := zerolog.New(os.Stdout).Level(zerolog.ErrorLevel).Hook(hook) - authorizedSenderValidator := validator.AuthorizedSenderValidator(logger, channel, func(pid peer.ID) (*flow.Identity, bool) { + authorizedSenderValidator := validator.AuthorizedSenderMessageValidator(logger, channel, func(pid peer.ID) (*flow.Identity, bool) { fid, err := translator.GetFlowID(pid) if err != nil { return &flow.Identity{}, false @@ -461,7 +461,7 @@ func TestAuthorizedSenderValidator_ClusterChannel(t *testing.T) { translator, err := p2p.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) - authorizedSenderValidator := validator.AuthorizedSenderValidator(zerolog.Nop(), channel, func(pid peer.ID) (*flow.Identity, bool) { + authorizedSenderValidator := validator.AuthorizedSenderMessageValidator(zerolog.Nop(), channel, func(pid peer.ID) (*flow.Identity, bool) { fid, err := translator.GetFlowID(pid) if err != nil { return &flow.Identity{}, false diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index ac6a8ce5691..4d881c612fe 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -24,12 +24,14 @@ var ( ErrIdentityUnverified = errors.New("validation failed: could not verify identity of sender") ) +type validateFunc func(ctx context.Context, from peer.ID, msg interface{}) (string, error) + // AuthorizedSenderValidator returns a MessageValidator that will check if the sender of a message is authorized to send the message. // The MessageValidator returned will use the getIdentity to get the flow identity for the sender, asserting that the sender is a staked node and not ejected. Otherwise, the message is rejected. // The message is also authorized by checking that the sender is allowed to send the message on the channel. // If validation fails the message is rejected, and if the validation error is an expected error, slashing data is also collected. // Authorization config is defined in message.MsgAuthConfig -func AuthorizedSenderValidator(log zerolog.Logger, channel channels.Channel, getIdentity func(peer.ID) (*flow.Identity, bool)) MessageValidator { +func AuthorizedSenderValidator(log zerolog.Logger, channel channels.Channel, getIdentity func(peer.ID) (*flow.Identity, bool)) validateFunc { log = log.With(). Str("component", "authorized_sender_validator"). Str("network_channel", channel.String()). @@ -37,26 +39,26 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel channels.Channel, get slashingViolationsConsumer := slashing.NewSlashingViolationsConsumer(log) - return func(ctx context.Context, from peer.ID, msg interface{}) pubsub.ValidationResult { + return func(ctx context.Context, from peer.ID, msg interface{}) (string, error) { identity, ok := getIdentity(from) if !ok { log.Error().Err(ErrIdentityUnverified).Str("peer_id", from.String()).Msg("rejecting message") - return pubsub.ValidationReject + return "", ErrUnauthorizedSender } - msgType, err := IsAuthorizedSender(identity, channel, msg) + msgType, err := isAuthorizedSender(identity, channel, msg) switch { case err == nil: - return pubsub.ValidationAccept + return msgType, nil case errors.Is(err, ErrUnauthorizedSender): slashingViolationsConsumer.OnUnAuthorizedSenderError(identity, from.String(), msgType, err) - return pubsub.ValidationReject + return msgType, ErrUnauthorizedSender case errors.Is(err, ErrUnknownMessageType): slashingViolationsConsumer.OnUnknownMsgTypeError(identity, from.String(), msgType, err) - return pubsub.ValidationReject + return msgType, ErrUnknownMessageType case errors.Is(err, ErrSenderEjected): slashingViolationsConsumer.OnSenderEjectedError(identity, from.String(), msgType, err) - return pubsub.ValidationReject + return msgType, ErrSenderEjected default: log.Error(). Err(err). @@ -65,12 +67,27 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel channels.Channel, get Str("peer_node_id", identity.NodeID.String()). Str("message_type", msgType). Msg("unexpected error during message validation") + return msgType, err + } + } +} + +// AuthorizedSenderMessageValidator wraps the callback returned by AuthorizedSenderValidator and returns +// MessageValidator callback that returns pubsub.ValidationReject if validation fails and pubsub.ValidationAccept if validation passes. +func AuthorizedSenderMessageValidator(log zerolog.Logger, channel channels.Channel, getIdentity func(peer.ID) (*flow.Identity, bool)) MessageValidator { + return func(ctx context.Context, from peer.ID, msg interface{}) pubsub.ValidationResult { + validate := AuthorizedSenderValidator(log, channel, getIdentity) + + _, err := validate(ctx, from, msg) + if err != nil { return pubsub.ValidationReject } + + return pubsub.ValidationAccept } } -// IsAuthorizedSender performs network authorization validation. This func will assert the following; +// isAuthorizedSender performs network authorization validation. This func will assert the following; // 1. The node is not ejected. // 2. Using the message auth config // A. The message is authorized to be sent on channel. @@ -79,7 +96,7 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel channels.Channel, get // * ErrSenderEjected: if identity of sender is ejected from the network // * ErrUnknownMessageType: if the message type does not have an auth config // * ErrUnauthorizedSender: if the sender is not authorized to send message on the channel -func IsAuthorizedSender(identity *flow.Identity, channel channels.Channel, msg interface{}) (string, error) { +func isAuthorizedSender(identity *flow.Identity, channel channels.Channel, msg interface{}) (string, error) { if identity.Ejected { return "", ErrSenderEjected } diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index 15be810a629..b66bbd95319 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -1,9 +1,12 @@ package validator import ( + "context" "fmt" "testing" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/rs/zerolog" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/model/flow" @@ -14,96 +17,121 @@ import ( ) type TestCase struct { - Identity *flow.Identity - Channel channels.Channel - Message interface{} - MessageStr string + Identity *flow.Identity + GetIdentity func(pid peer.ID) (*flow.Identity, bool) + Channel channels.Channel + Message interface{} + MessageStr string } func TestIsAuthorizedSender(t *testing.T) { - suite.Run(t, new(TestIsAuthorizedSenderSuite)) + suite.Run(t, new(TestAuthorizedSenderValidatorSuite)) } -type TestIsAuthorizedSenderSuite struct { +type TestAuthorizedSenderValidatorSuite struct { suite.Suite authorizedSenderTestCases []TestCase unauthorizedSenderTestCases []TestCase unauthorizedMessageOnChannelTestCases []TestCase } -func (s *TestIsAuthorizedSenderSuite) SetupTest() { +func (s *TestAuthorizedSenderValidatorSuite) SetupTest() { s.initializeAuthorizationTestCases() s.initializeInvalidMessageOnChannelTestCases() } -// TestIsAuthorizedSender_AuthorizedSender checks that IsAuthorizedSender does not return false positive +// TestValidatorCallback_AuthorizedSender checks that the call back returned from AuthorizedSenderValidator does not return false positive // validation errors for all possible valid combinations (authorized sender role, message type). -func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_AuthorizedSender() { +func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_AuthorizedSender() { for _, c := range s.authorizedSenderTestCases { str := fmt.Sprintf("role (%s) should be authorized to send message type (%s) on channel (%s)", c.Identity.Role, c.MessageStr, c.Channel) s.Run(str, func() { - msgType, err := IsAuthorizedSender(c.Identity, c.Channel, c.Message) + validate := AuthorizedSenderValidator(zerolog.Nop(), c.Channel, c.GetIdentity) + + pid, err := unittest.PeerIDFromFlowID(c.Identity) + s.Require().NoError(err) + + msgType, err := validate(context.Background(), pid, c.Message) s.Require().NoError(err) s.Require().Equal(c.MessageStr, msgType) }) } } -// TestIsAuthorizedSender_UnAuthorizedSender checks that IsAuthorizedSender return's ErrUnauthorizedSender +// TestValidatorCallback_UnAuthorizedSender checks that the call back returned from AuthorizedSenderValidator return's ErrUnauthorizedSender // validation error for all possible invalid combinations (unauthorized sender role, message type). -func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_UnAuthorizedSender() { +func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedSender() { for _, c := range s.unauthorizedSenderTestCases { str := fmt.Sprintf("role (%s) should not be authorized to send message type (%s) on channel (%s)", c.Identity.Role, c.MessageStr, c.Channel) s.Run(str, func() { - msgType, err := IsAuthorizedSender(c.Identity, c.Channel, c.Message) + validate := AuthorizedSenderValidator(zerolog.Nop(), c.Channel, c.GetIdentity) + + pid, err := unittest.PeerIDFromFlowID(c.Identity) + s.Require().NoError(err) + msgType, err := validate(context.Background(), pid, c.Message) s.Require().ErrorIs(err, message.ErrUnauthorizedRole) s.Require().Equal(c.MessageStr, msgType) }) } } -// TestIsAuthorizedSender_UnAuthorizedSender for each invalid combination of message type and channel -// an appropriate error message.ErrUnauthorizedMessageOnChannel is returned. -func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_UnAuthorizedMessageOnChannel() { +// TestValidatorCallback_UnAuthorizedMessageOnChannel for each invalid combination of message type and channel +// the call back returned from AuthorizedSenderValidator returns the appropriate error message.ErrUnauthorizedMessageOnChannel. +func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedMessageOnChannel() { for _, c := range s.unauthorizedMessageOnChannelTestCases { str := fmt.Sprintf("message type (%s) should not be authorized to be sent on channel (%s)", c.MessageStr, c.Channel) s.Run(str, func() { - msgType, err := IsAuthorizedSender(c.Identity, c.Channel, c.Message) + validate := AuthorizedSenderValidator(zerolog.Nop(), c.Channel, c.GetIdentity) + + pid, err := unittest.PeerIDFromFlowID(c.Identity) + s.Require().NoError(err) + + msgType, err := validate(context.Background(), pid, c.Message) s.Require().ErrorIs(err, message.ErrUnauthorizedMessageOnChannel) s.Require().Equal(c.MessageStr, msgType) }) } } -// TestIsAuthorizedSender_ClusterPrefixedChannels checks that IsAuthorizedSender correctly +// TestValidatorCallback_ClusterPrefixedChannels checks that the call back returned from AuthorizedSenderValidator correctly // handles cluster prefixed channels during validation. -func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_ClusterPrefixedChannels() { - identity := unittest.IdentityFixture(unittest.WithRole(flow.RoleCollection)) +func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ClusterPrefixedChannels() { + identity, _ := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleCollection)) clusterID := flow.Localnet - // collection consensus cluster - msgType, err := IsAuthorizedSender(identity, channels.ConsensusCluster(clusterID), &messages.ClusterBlockResponse{}) + getIdentityFunc := s.getIdentity(identity) + pid, err := unittest.PeerIDFromFlowID(identity) + s.Require().NoError(err) + + // validate collection consensus cluster + validateCollConsensus := AuthorizedSenderValidator(zerolog.Nop(), channels.ConsensusCluster(clusterID), getIdentityFunc) + msgType, err := validateCollConsensus(context.Background(), pid, &messages.ClusterBlockResponse{}) s.Require().NoError(err) s.Require().Equal(message.ClusterBlockResponse, msgType) - // collection sync cluster - msgType, err = IsAuthorizedSender(identity, channels.SyncCluster(clusterID), &messages.SyncRequest{}) + // validate collection sync cluster + validateSyncCluster := AuthorizedSenderValidator(zerolog.Nop(), channels.SyncCluster(clusterID), getIdentityFunc) + msgType, err = validateSyncCluster(context.Background(), pid, &messages.SyncRequest{}) s.Require().NoError(err) s.Require().Equal(message.SyncRequest, msgType) } -// TestIsAuthorizedSender_ValidationFailure checks that IsAuthorizedSender returns the expected validation error. -func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_ValidationFailure() { +// TestValidatorCallback_ValidationFailure checks that the call back returned from AuthorizedSenderValidator returns the expected validation error. +func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFailure() { s.Run("sender is ejected", func() { - identity := unittest.IdentityFixture() + identity, _ := unittest.IdentityWithNetworkingKeyFixture() identity.Ejected = true - msgType, err := IsAuthorizedSender(identity, channels.SyncCommittee, &messages.SyncRequest{}) + getIdentityFunc := s.getIdentity(identity) + pid, err := unittest.PeerIDFromFlowID(identity) + s.Require().NoError(err) + validate := AuthorizedSenderValidator(zerolog.Nop(), channels.SyncCommittee, getIdentityFunc) + msgType, err := validate(context.Background(), pid, &messages.SyncRequest{}) s.Require().ErrorIs(err, ErrSenderEjected) s.Require().Equal("", msgType) }) s.Run("unknown message type", func() { - identity := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus)) + identity, _ := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleConsensus)) type msg struct { *messages.BlockProposal } @@ -114,29 +142,51 @@ func (s *TestIsAuthorizedSenderSuite) TestIsAuthorizedSender_ValidationFailure() Payload: nil, }} + getIdentityFunc := s.getIdentity(identity) + pid, err := unittest.PeerIDFromFlowID(identity) + s.Require().NoError(err) + validate := AuthorizedSenderValidator(zerolog.Nop(), channels.ConsensusCommittee, getIdentityFunc) + // unknown message types are rejected - msgType, err := IsAuthorizedSender(identity, channels.ConsensusCommittee, m) + msgType, err := validate(context.Background(), pid, m) + s.Require().ErrorIs(err, ErrUnknownMessageType) s.Require().Equal("", msgType) // nil messages are rejected - msgType, err = IsAuthorizedSender(identity, channels.ConsensusCommittee, nil) + msgType, err = validate(context.Background(), pid, nil) s.Require().ErrorIs(err, ErrUnknownMessageType) s.Require().Equal("", msgType) }) + + s.Run("sender is not staked getIdentityFunc does not return identity ", func() { + identity, _ := unittest.IdentityWithNetworkingKeyFixture() + + // getIdentityFunc simulates unstaked node not found in participant list + getIdentityFunc := func(id peer.ID) (*flow.Identity, bool) { return nil, false } + + pid, err := unittest.PeerIDFromFlowID(identity) + s.Require().NoError(err) + + validate := AuthorizedSenderValidator(zerolog.Nop(), channels.SyncCommittee, getIdentityFunc) + msgType, err := validate(context.Background(), pid, &messages.SyncRequest{}) + s.Require().ErrorIs(err, ErrUnauthorizedSender) + s.Require().Equal("", msgType) + }) } // initializeAuthorizationTestCases initializes happy and sad path test cases for checking authorized and unauthorized role message combinations. -func (s *TestIsAuthorizedSenderSuite) initializeAuthorizationTestCases() { +func (s *TestAuthorizedSenderValidatorSuite) initializeAuthorizationTestCases() { for _, c := range message.AuthorizationConfigs { for channel, authorizedRoles := range c.Config { for _, role := range flow.Roles() { - identity := unittest.IdentityFixture(unittest.WithRole(role)) + identity, _ := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(role)) tc := TestCase{ - Identity: identity, - Channel: channel, - Message: c.Type(), - MessageStr: c.Name, + Identity: identity, + GetIdentity: s.getIdentity(identity), + Channel: channel, + Message: c.Type(), + MessageStr: c.Name, } if authorizedRoles.Contains(role) { @@ -153,11 +203,11 @@ func (s *TestIsAuthorizedSenderSuite) initializeAuthorizationTestCases() { // initializeInvalidMessageOnChannelTestCases initializes test cases for all possible combinations of invalid message types on channel. // NOTE: the role in the test case does not matter since ErrUnauthorizedMessageOnChannel will be returned before the role is checked. -func (s *TestIsAuthorizedSenderSuite) initializeInvalidMessageOnChannelTestCases() { +func (s *TestAuthorizedSenderValidatorSuite) initializeInvalidMessageOnChannelTestCases() { // iterate all channels for _, c := range message.AuthorizationConfigs { for channel, authorizedRoles := range c.Config { - identity := unittest.IdentityFixture(unittest.WithRole(authorizedRoles[0])) + identity, _ := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(authorizedRoles[0])) // iterate all message types for _, config := range message.AuthorizationConfigs { @@ -166,10 +216,11 @@ func (s *TestIsAuthorizedSenderSuite) initializeInvalidMessageOnChannelTestCases _, ok := config.Config[channel] if config.Name != c.Name && !ok { tc := TestCase{ - Identity: identity, - Channel: channel, - Message: config.Type(), - MessageStr: config.Name, + Identity: identity, + GetIdentity: s.getIdentity(identity), + Channel: channel, + Message: config.Type(), + MessageStr: config.Name, } s.unauthorizedMessageOnChannelTestCases = append(s.unauthorizedMessageOnChannelTestCases, tc) } @@ -177,3 +228,10 @@ func (s *TestIsAuthorizedSenderSuite) initializeInvalidMessageOnChannelTestCases } } } + +// getIdentity returns a callback that simply returns the provided identity. +func (s *TestAuthorizedSenderValidatorSuite) getIdentity(id *flow.Identity) func(pid peer.ID) (*flow.Identity, bool) { + return func(pid peer.ID) (*flow.Identity, bool) { + return id, true + } +} diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index b254c1563e9..46fe088eaeb 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -11,6 +11,7 @@ import ( "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" "github.com/onflow/cadence" + "github.com/onflow/flow-go/network/p2p/keyutils" "github.com/stretchr/testify/require" sdk "github.com/onflow/flow-go-sdk" @@ -1413,7 +1414,7 @@ func WithChunkID(chunkID flow.Identifier) func(*verification.ChunkDataPackReques // and height of zero. // Use options to customize the request. func ChunkDataPackRequestFixture(opts ...func(*verification.ChunkDataPackRequest)) *verification. - ChunkDataPackRequest { +ChunkDataPackRequest { req := &verification.ChunkDataPackRequest{ Locator: chunks.Locator{ @@ -2106,3 +2107,18 @@ func NewSealingConfigs(val uint) module.SealingConfigsSetter { } return instance } + +func PeerIDFromFlowID(identity *flow.Identity) (peer.ID, error) { + networkKey := identity.NetworkPubKey + peerPK, err := keyutils.LibP2PPublicKeyFromFlow(networkKey) + if err != nil { + return peer.ID(0), err + } + + peerID, err := peer.IDFromPublicKey(peerPK) + if err != nil { + return peer.ID(0), err + } + + return peerID, nil +} From dfd1ec76b910c1a73563b90fae5e9f60cfd702ac Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 4 Jul 2022 20:29:24 -0400 Subject: [PATCH 089/223] Update fixtures.go --- utils/unittest/fixtures.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index 46fe088eaeb..adef106f9ca 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -10,10 +10,9 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" - "github.com/onflow/cadence" - "github.com/onflow/flow-go/network/p2p/keyutils" "github.com/stretchr/testify/require" + "github.com/onflow/cadence" sdk "github.com/onflow/flow-go-sdk" hotstuff "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/crypto" @@ -33,6 +32,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/mempool/entity" "github.com/onflow/flow-go/module/updatable_configs" + "github.com/onflow/flow-go/network/p2p/keyutils" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/utils/dsl" @@ -1414,7 +1414,7 @@ func WithChunkID(chunkID flow.Identifier) func(*verification.ChunkDataPackReques // and height of zero. // Use options to customize the request. func ChunkDataPackRequestFixture(opts ...func(*verification.ChunkDataPackRequest)) *verification. -ChunkDataPackRequest { + ChunkDataPackRequest { req := &verification.ChunkDataPackRequest{ Locator: chunks.Locator{ @@ -2112,12 +2112,12 @@ func PeerIDFromFlowID(identity *flow.Identity) (peer.ID, error) { networkKey := identity.NetworkPubKey peerPK, err := keyutils.LibP2PPublicKeyFromFlow(networkKey) if err != nil { - return peer.ID(0), err + return "", err } peerID, err := peer.IDFromPublicKey(peerPK) if err != nil { - return peer.ID(0), err + return "", err } return peerID, nil From 35d4105ffa4baea049910c81c6901c78e8fc14f6 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 4 Jul 2022 20:31:13 -0400 Subject: [PATCH 090/223] Update fixtures.go --- utils/unittest/fixtures.go | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index adef106f9ca..0dda6064ddf 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" "github.com/onflow/cadence" + sdk "github.com/onflow/flow-go-sdk" hotstuff "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/crypto" From 815356e361a4a3804e28605c1e2a15714bd22066 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 6 Jul 2022 10:38:31 -0400 Subject: [PATCH 091/223] remove sentinels, instead document benign errors --- network/errors.go | 23 ----------- network/middleware.go | 7 ++++ network/p2p/errors.go | 28 ------------- network/p2p/middleware.go | 87 ++++++++++++++++++--------------------- 4 files changed, 47 insertions(+), 98 deletions(-) delete mode 100644 network/p2p/errors.go diff --git a/network/errors.go b/network/errors.go index 84bd25a0f7a..07ecbc23aa1 100644 --- a/network/errors.go +++ b/network/errors.go @@ -2,31 +2,8 @@ package network import ( "errors" - "fmt" ) var ( EmptyTargetList = errors.New("target list empty") ) - -// BenignNetworkingError covers the entire class of benign networking errors. -// This error is only returned, if the networking layer is still fully functional -// despite the encountering the error condition. -type BenignNetworkingError struct { - err error -} - -func NewBenignNetworkingErrorf(msg string, args ...interface{}) error { - return BenignNetworkingError{ - err: fmt.Errorf(msg, args...), - } -} - -func (e BenignNetworkingError) Error() string { return e.err.Error() } -func (e BenignNetworkingError) Unwrap() error { return e.err } - -// IsBenignNetworkingError returns whether err is an BenignNetworkingError -func IsBenignNetworkingError(err error) bool { - var e BenignNetworkingError - return errors.As(err, &e) -} diff --git a/network/middleware.go b/network/middleware.go index f6282b2f3a8..d9830f41864 100644 --- a/network/middleware.go +++ b/network/middleware.go @@ -35,17 +35,24 @@ type Middleware interface { // // Dispatch should be used whenever guaranteed delivery to a specific target is required. Otherwise, Publish is // a more efficient candidate. + // During normal operations any error returned is expected to be benign. SendDirect(msg *message.Message, targetID flow.Identifier) error // Publish publishes a message on the channel. It models a distributed broadcast where the message is meant for all or // a many nodes subscribing to the channel. It does not guarantee the delivery though, and operates on a best // effort. + // During normal operations any error returned is expected to be benign. Publish(msg *message.Message, channel Channel) error // Subscribe subscribes the middleware to a channel. + // During normal operations no errors are expected to be returned. + // If the libP2P node fails to subscribe to the topic created from + // the provided channel and returns an error this error is considered + // catastrophic as the node would not be able to operate correctly. Subscribe(channel Channel) error // Unsubscribe unsubscribes the middleware from a channel. + // During normal operations any error returned is expected to be benign. Unsubscribe(channel Channel) error // UpdateNodeAddresses fetches and updates the addresses of all the authorized participants diff --git a/network/p2p/errors.go b/network/p2p/errors.go deleted file mode 100644 index adf708ce174..00000000000 --- a/network/p2p/errors.go +++ /dev/null @@ -1,28 +0,0 @@ -package p2p - -import ( - "errors" - "fmt" -) - -// MiddlewareStartError covers the entire class of errors returned from Middleware.start . -// This error is returned if the Middleware fails to start for any reason. -// The networking layer will not be functional if this error condition is encountered. -type MiddlewareStartError struct { - err error -} - -func NewMiddlewareStartErrorf(msg string, args ...interface{}) error { - return MiddlewareStartError{ - err: fmt.Errorf(msg, args...), - } -} - -func (e MiddlewareStartError) Error() string { return e.err.Error() } -func (e MiddlewareStartError) Unwrap() error { return e.err } - -// IsMiddlewareStartError returns whether err is an MiddlewareStartError -func IsMiddlewareStartError(err error) bool { - var e MiddlewareStartError - return errors.As(err, &e) -} diff --git a/network/p2p/middleware.go b/network/p2p/middleware.go index b9e404f0435..a43c33fe699 100644 --- a/network/p2p/middleware.go +++ b/network/p2p/middleware.go @@ -130,8 +130,8 @@ func WithPeerManager(peerManagerFunc PeerManagerFactoryFunc) MiddlewareOption { // connectionGating if set to True, restricts this node to only talk to other nodes which are part of the identity list // managePeerConnections if set to True, enables the default PeerManager which continuously updates the node's peer connections // validators are the set of the different message validators that each inbound messages is passed through -// During normal operations, the following errors are expected to be thrown by the irrecoverable.SignalerContext: -// * MiddlewareStartError if middleware fails to start for any reason. +// During normal operations any error returned by Middleware.start is considered to be catastrophic +// and will be thrown by the irrecoverable.SignalerContext causing the node to crash. func NewMiddleware( log zerolog.Logger, libP2PNodeFactory LibP2PFactoryFunc, @@ -248,12 +248,12 @@ func (m *Middleware) Me() flow.Identifier { } // GetIPPort returns the ip address and port number associated with the middleware -// During normal operations, the following benign errors are expected: -// * BenignNetworkingError if the libP2P node fails to return the IP and port. +// During normal operations a benign error is expected +// if the libP2P node fails to return the IP and port. func (m *Middleware) GetIPPort() (string, string, error) { ipOrHostname, port, err := m.libP2PNode.GetIPPort() if err != nil { - return "", "", network.NewBenignNetworkingErrorf("failed to get ip and port from libP2P node: %w", err) + return "", "", fmt.Errorf("failed to get ip and port from libP2P node: %w", err) } return ipOrHostname, port, nil @@ -289,27 +289,22 @@ func (m *Middleware) SetOverlay(ov network.Overlay) { } // start will start the middleware. -// During normal operations, the following errors are expected: -// * MiddlewareStartError if -// - the overlay is not set on the Middleware. -// - creating the libP2P node fails. -// - registering the preferred unicast protocols fails. -// - creating the peer manager fails. -// - starting the peer manager fails. +// During normal operations if any error is returned it +// is considered catastrophic and the node should crash. func (m *Middleware) start(ctx context.Context) error { if m.ov == nil { - return NewMiddlewareStartErrorf("could not start middleware: %w", ErrMissingOverlay) + return fmt.Errorf("could not start middleware: %w", ErrMissingOverlay) } libP2PNode, err := m.libP2PNodeFactory(ctx) if err != nil { - return NewMiddlewareStartErrorf("could not create libp2p node: %w", err) + return fmt.Errorf("could not create libp2p node: %w", err) } m.libP2PNode = libP2PNode err = m.libP2PNode.WithDefaultUnicastProtocol(m.handleIncomingStream, m.preferredUnicasts) if err != nil { - return NewMiddlewareStartErrorf("could not register preferred unicast protocols on libp2p node: %w", err) + return fmt.Errorf("could not register preferred unicast protocols on libp2p node: %w", err) } m.UpdateNodeAddresses() @@ -318,14 +313,14 @@ func (m *Middleware) start(ctx context.Context) error { if m.peerManagerFactory != nil { m.peerManager, err = m.peerManagerFactory(m.libP2PNode.host, m.topologyPeers, m.log) if err != nil { - return NewMiddlewareStartErrorf("failed to create peer manager: %w", err) + return fmt.Errorf("failed to create peer manager: %w", err) } select { case <-m.peerManager.Ready(): m.log.Debug().Msg("peer manager successfully started") case <-time.After(30 * time.Second): - return NewMiddlewareStartErrorf("could not start peer manager") + return fmt.Errorf("could not start peer manager") } } @@ -362,19 +357,18 @@ func (m *Middleware) stop() { // a more efficient candidate. // // During normal operations, the following benign errors are expected: -// * BenignNetworkingError if -// - the peer ID for the target node ID cannot be found. -// - the msg size exceeds result returned from unicastMaxMsgSize(msg) -// - the libP2P node fails to publish the message. -// - the libP2P node fails to create the stream. -// - setting write deadline on the stream fails. -// - the gogo protobuf writer fails to write the message. -// - flushing the stream fails. +// - the peer ID for the target node ID cannot be found. +// - the msg size exceeds result returned from unicastMaxMsgSize(msg) +// - the libP2P node fails to publish the message. +// - the libP2P node fails to create the stream. +// - setting write deadline on the stream fails. +// - the gogo protobuf writer fails to write the message. +// - flushing the stream fails. func (m *Middleware) SendDirect(msg *message.Message, targetID flow.Identifier) (err error) { // translates identifier to peer id peerID, err := m.idTranslator.GetPeerID(targetID) if err != nil { - return network.NewBenignNetworkingErrorf("could not find peer id for target id: %w", err) + return fmt.Errorf("could not find peer id for target id: %w", err) } maxMsgSize := unicastMaxMsgSize(msg) @@ -382,7 +376,7 @@ func (m *Middleware) SendDirect(msg *message.Message, targetID flow.Identifier) // message size goes beyond maximum size that the serializer can handle. // proceeding with this message results in closing the connection by the target side, and // delivery failure. - return network.NewBenignNetworkingErrorf("message size %d exceeds configured max message size %d", msg.Size(), maxMsgSize) + return fmt.Errorf("message size %d exceeds configured max message size %d", msg.Size(), maxMsgSize) } maxTimeout := m.unicastMaxMsgDuration(msg) @@ -406,7 +400,7 @@ func (m *Middleware) SendDirect(msg *message.Message, targetID flow.Identifier) // sent out the receiver stream, err := m.libP2PNode.CreateStream(ctx, peerID) if err != nil { - return network.NewBenignNetworkingErrorf("failed to create stream for %s: %w", targetID, err) + return fmt.Errorf("failed to create stream for %s: %w", targetID, err) } success := false @@ -432,7 +426,7 @@ func (m *Middleware) SendDirect(msg *message.Message, targetID flow.Identifier) deadline, _ := ctx.Deadline() err = stream.SetWriteDeadline(deadline) if err != nil { - return network.NewBenignNetworkingErrorf("failed to set write deadline for stream: %w", err) + return fmt.Errorf("failed to set write deadline for stream: %w", err) } // create a gogo protobuf writer @@ -441,13 +435,13 @@ func (m *Middleware) SendDirect(msg *message.Message, targetID flow.Identifier) err = writer.WriteMsg(msg) if err != nil { - return network.NewBenignNetworkingErrorf("failed to send message to %s: %w", targetID, err) + return fmt.Errorf("failed to send message to %s: %w", targetID, err) } // flush the stream err = bufw.Flush() if err != nil { - return network.NewBenignNetworkingErrorf("failed to flush stream for %s: %w", targetID, err) + return fmt.Errorf("failed to flush stream for %s: %w", targetID, err) } success = true @@ -555,9 +549,10 @@ func (m *Middleware) handleIncomingStream(s libp2pnetwork.Stream) { } // Subscribe subscribes the middleware to a channel. -// During normal operations, the following benign errors are expected: -// * BenignNetworkingError if the libP2P node fails to subscribe to the topic -// created from the provided channel. +// During normal operations no errors are expected to be returned. +// If the libP2P node fails to subscribe to the topic created from +// the provided channel and returns an error this error is considered +// catastrophic as the node would not be able to operate correctly. func (m *Middleware) Subscribe(channel network.Channel) error { topic := network.TopicFromChannel(channel, m.rootBlockID) @@ -581,7 +576,7 @@ func (m *Middleware) Subscribe(channel network.Channel) error { s, err := m.libP2PNode.Subscribe(topic, m.codec, peerFilter, validators...) if err != nil { - return network.NewBenignNetworkingErrorf("could not subscribe to topic (%s): %w", topic, err) + return fmt.Errorf("could not subscribe to topic (%s): %w", topic, err) } // create a new readSubscription with the context of the middleware @@ -599,13 +594,12 @@ func (m *Middleware) Subscribe(channel network.Channel) error { // Unsubscribe unsubscribes the middleware from a channel. // During normal operations, the following benign errors are expected: -// * BenignNetworkingError if the libP2P node fails to unsubscribe to the topic -// created from the provided channel. +// - the libP2P node fails to unsubscribe to the topic created from the provided channel. func (m *Middleware) Unsubscribe(channel network.Channel) error { topic := network.TopicFromChannel(channel, m.rootBlockID) err := m.libP2PNode.UnSubscribe(topic) if err != nil { - return network.NewBenignNetworkingErrorf("failed to unsubscribe from channel (%s): %w", channel, err) + return fmt.Errorf("failed to unsubscribe from channel (%s): %w", channel, err) } // update peers to remove nodes subscribed to channel @@ -659,10 +653,9 @@ func (m *Middleware) processMessage(msg *message.Message, decodedMsgPayload inte // a many nodes subscribing to the channel. It does not guarantee the delivery though, and operates on a best // effort. // During normal operations, the following benign errors are expected: -// * BenignNetworkingError if -// - the msg cannot be marshalled. -// - the msg size exceeds DefaultMaxPubSubMsgSize. -// - the libP2P node fails to publish the message. +// - the msg cannot be marshalled. +// - the msg size exceeds DefaultMaxPubSubMsgSize. +// - the libP2P node fails to publish the message. func (m *Middleware) Publish(msg *message.Message, channel network.Channel) error { m.log.Debug().Str("channel", channel.String()).Interface("msg", msg).Msg("publishing new message") @@ -671,14 +664,14 @@ func (m *Middleware) Publish(msg *message.Message, channel network.Channel) erro data, err := msg.Marshal() //binstat.LeaveVal(bs, int64(len(data))) if err != nil { - return network.NewBenignNetworkingErrorf("failed to marshal the message: %w", err) + return fmt.Errorf("failed to marshal the message: %w", err) } msgSize := len(data) if msgSize > DefaultMaxPubSubMsgSize { // libp2p pubsub will silently drop the message if its size is greater than the configured pubsub max message size // hence return an error as this message is undeliverable - return network.NewBenignNetworkingErrorf("message size %d exceeds configured max message size %d", msgSize, DefaultMaxPubSubMsgSize) + return fmt.Errorf("message size %d exceeds configured max message size %d", msgSize, DefaultMaxPubSubMsgSize) } topic := network.TopicFromChannel(channel, m.rootBlockID) @@ -686,7 +679,7 @@ func (m *Middleware) Publish(msg *message.Message, channel network.Channel) erro // publish the bytes on the topic err = m.libP2PNode.Publish(m.ctx, topic, data) if err != nil { - return network.NewBenignNetworkingErrorf("failed to publish the message: %w", err) + return fmt.Errorf("failed to publish the message: %w", err) } m.metrics.NetworkMessageSent(len(data), string(channel), msg.Type) @@ -696,11 +689,11 @@ func (m *Middleware) Publish(msg *message.Message, channel network.Channel) erro // IsConnected returns true if this node is connected to the node with id nodeID. // During normal operations, the following benign errors are expected: -// * BenignNetworkingError if the peer ID for the target node ID cannot be found. +// - the peer ID for the target node ID cannot be found. func (m *Middleware) IsConnected(nodeID flow.Identifier) (bool, error) { peerID, err := m.idTranslator.GetPeerID(nodeID) if err != nil { - return false, network.NewBenignNetworkingErrorf("could not find peer id for target id: %w", err) + return false, fmt.Errorf("could not find peer id for target id: %w", err) } return m.libP2PNode.IsConnected(peerID) } From ad03342c94c6797a059d3b6b99945b2a8886f59a Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 6 Jul 2022 15:03:24 -0400 Subject: [PATCH 092/223] add consistency to error docs --- network/middleware.go | 11 ++++------- network/p2p/middleware.go | 34 ++++++++++++++++------------------ 2 files changed, 20 insertions(+), 25 deletions(-) diff --git a/network/middleware.go b/network/middleware.go index d9830f41864..5c201f5adad 100644 --- a/network/middleware.go +++ b/network/middleware.go @@ -35,24 +35,21 @@ type Middleware interface { // // Dispatch should be used whenever guaranteed delivery to a specific target is required. Otherwise, Publish is // a more efficient candidate. - // During normal operations any error returned is expected to be benign. + // All errors returned from this function can be considered benign. SendDirect(msg *message.Message, targetID flow.Identifier) error // Publish publishes a message on the channel. It models a distributed broadcast where the message is meant for all or // a many nodes subscribing to the channel. It does not guarantee the delivery though, and operates on a best // effort. - // During normal operations any error returned is expected to be benign. + // All errors returned from this function can be considered benign. Publish(msg *message.Message, channel Channel) error // Subscribe subscribes the middleware to a channel. - // During normal operations no errors are expected to be returned. - // If the libP2P node fails to subscribe to the topic created from - // the provided channel and returns an error this error is considered - // catastrophic as the node would not be able to operate correctly. + // No errors are expected during normal operation. Subscribe(channel Channel) error // Unsubscribe unsubscribes the middleware from a channel. - // During normal operations any error returned is expected to be benign. + // All errors returned from this function can be considered benign. Unsubscribe(channel Channel) error // UpdateNodeAddresses fetches and updates the addresses of all the authorized participants diff --git a/network/p2p/middleware.go b/network/p2p/middleware.go index a43c33fe699..7c3aac104cf 100644 --- a/network/p2p/middleware.go +++ b/network/p2p/middleware.go @@ -5,7 +5,6 @@ package p2p import ( "bufio" "context" - "errors" "fmt" "io" "sync" @@ -64,8 +63,6 @@ var ( // allowAll is a peerFilterFunc that will always return true for all peer ids. // This filter is used to allow communication by all roles on public network channels. allowAll = func(_ peer.ID) bool { return true } - - ErrMissingOverlay = errors.New("overlay must be configured by calling SetOverlay before middleware can be started") ) // peerFilterFunc is a func type that will be used in the TopicValidator to filter @@ -213,8 +210,9 @@ func (m *Middleware) NewPingService(pingProtocol protocol.ID, provider network.P return NewPingService(m.libP2PNode.Host(), pingProtocol, m.log, provider) } -// topologyPeers callback used by the peer manager that the list of peer ID's +// topologyPeers callback used by the peer manager to get the list of peer ID's // which this node should be directly connected to as peers. +// No errors are expected during normal operation. func (m *Middleware) topologyPeers() (peer.IDSlice, error) { identities, err := m.ov.Topology() if err != nil { @@ -248,8 +246,7 @@ func (m *Middleware) Me() flow.Identifier { } // GetIPPort returns the ip address and port number associated with the middleware -// During normal operations a benign error is expected -// if the libP2P node fails to return the IP and port. +// All errors returned from this function can be considered benign. func (m *Middleware) GetIPPort() (string, string, error) { ipOrHostname, port, err := m.libP2PNode.GetIPPort() if err != nil { @@ -289,11 +286,10 @@ func (m *Middleware) SetOverlay(ov network.Overlay) { } // start will start the middleware. -// During normal operations if any error is returned it -// is considered catastrophic and the node should crash. +// No errors are expected during normal operation. func (m *Middleware) start(ctx context.Context) error { if m.ov == nil { - return fmt.Errorf("could not start middleware: %w", ErrMissingOverlay) + return fmt.Errorf("could not start middleware: overlay must be configured by calling SetOverlay before middleware can be started") } libP2PNode, err := m.libP2PNodeFactory(ctx) @@ -356,7 +352,7 @@ func (m *Middleware) stop() { // Dispatch should be used whenever guaranteed delivery to a specific target is required. Otherwise, Publish is // a more efficient candidate. // -// During normal operations, the following benign errors are expected: +// The following benign errors can be returned from libp2p: // - the peer ID for the target node ID cannot be found. // - the msg size exceeds result returned from unicastMaxMsgSize(msg) // - the libP2P node fails to publish the message. @@ -364,6 +360,8 @@ func (m *Middleware) stop() { // - setting write deadline on the stream fails. // - the gogo protobuf writer fails to write the message. // - flushing the stream fails. +// +// All errors returned from this function can be considered benign. func (m *Middleware) SendDirect(msg *message.Message, targetID flow.Identifier) (err error) { // translates identifier to peer id peerID, err := m.idTranslator.GetPeerID(targetID) @@ -549,10 +547,7 @@ func (m *Middleware) handleIncomingStream(s libp2pnetwork.Stream) { } // Subscribe subscribes the middleware to a channel. -// During normal operations no errors are expected to be returned. -// If the libP2P node fails to subscribe to the topic created from -// the provided channel and returns an error this error is considered -// catastrophic as the node would not be able to operate correctly. +// No errors are expected during normal operation. func (m *Middleware) Subscribe(channel network.Channel) error { topic := network.TopicFromChannel(channel, m.rootBlockID) @@ -593,8 +588,10 @@ func (m *Middleware) Subscribe(channel network.Channel) error { } // Unsubscribe unsubscribes the middleware from a channel. -// During normal operations, the following benign errors are expected: +// The following benign errors are expected during normal operations from libP2P: // - the libP2P node fails to unsubscribe to the topic created from the provided channel. +// +// All errors returned from this function can be considered benign. func (m *Middleware) Unsubscribe(channel network.Channel) error { topic := network.TopicFromChannel(channel, m.rootBlockID) err := m.libP2PNode.UnSubscribe(topic) @@ -652,10 +649,12 @@ func (m *Middleware) processMessage(msg *message.Message, decodedMsgPayload inte // Publish publishes a message on the channel. It models a distributed broadcast where the message is meant for all or // a many nodes subscribing to the channel. It does not guarantee the delivery though, and operates on a best // effort. -// During normal operations, the following benign errors are expected: +// The following benign errors are expected during normal operations: // - the msg cannot be marshalled. // - the msg size exceeds DefaultMaxPubSubMsgSize. // - the libP2P node fails to publish the message. +// +// All errors returned from this function can be considered benign. func (m *Middleware) Publish(msg *message.Message, channel network.Channel) error { m.log.Debug().Str("channel", channel.String()).Interface("msg", msg).Msg("publishing new message") @@ -688,8 +687,7 @@ func (m *Middleware) Publish(msg *message.Message, channel network.Channel) erro } // IsConnected returns true if this node is connected to the node with id nodeID. -// During normal operations, the following benign errors are expected: -// - the peer ID for the target node ID cannot be found. +// All errors returned from this function can be considered benign. func (m *Middleware) IsConnected(nodeID flow.Identifier) (bool, error) { peerID, err := m.idTranslator.GetPeerID(nodeID) if err != nil { From bbe8d5bda754e224335fa2dd20896853c8061a1e Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Wed, 6 Jul 2022 17:11:39 -0400 Subject: [PATCH 093/223] comments and switch followerBlockProposal struct for inRangeBlockResponse argument --- engine/common/follower/engine.go | 63 ++++++++++++-------------------- 1 file changed, 24 insertions(+), 39 deletions(-) diff --git a/engine/common/follower/engine.go b/engine/common/follower/engine.go index d6248468313..f9c9d05c1c6 100644 --- a/engine/common/follower/engine.go +++ b/engine/common/follower/engine.go @@ -24,11 +24,6 @@ import ( "github.com/onflow/flow-go/utils/logging" ) -type followerBlockProposal struct { - *messages.BlockProposal - Wait bool -} - type Engine struct { unit *engine.Unit log zerolog.Logger @@ -168,10 +163,7 @@ func (e *Engine) process(originID flow.Identifier, input interface{}) error { defer e.engMetrics.MessageHandled(metrics.EngineFollower, metrics.MessageBlockProposal) e.unit.Lock() defer e.unit.Unlock() - return e.onBlockProposal(originID, &followerBlockProposal{ - BlockProposal: v, - Wait: false, - }) + return e.onBlockProposal(originID, v, false) default: return fmt.Errorf("invalid event type (%T)", input) } @@ -186,28 +178,22 @@ func (e *Engine) onSyncedBlock(originID flow.Identifier, synced *events.SyncedBl } // process as proposal - proposal := &followerBlockProposal{ - BlockProposal: &messages.BlockProposal{ - Header: synced.Block.Header, - Payload: synced.Block.Payload, - }, - Wait: false, + proposal := &messages.BlockProposal{ + Header: synced.Block.Header, + Payload: synced.Block.Payload, } - return e.onBlockProposal(originID, proposal) + return e.onBlockProposal(originID, proposal, false) } func (e *Engine) onBlockResponse(originID flow.Identifier, res *messages.BlockResponse) error { for _, block := range res.Blocks { - proposal := &followerBlockProposal{ - BlockProposal: &messages.BlockProposal{ - Header: block.Header, - Payload: block.Payload, - }, - Wait: true, + proposal := &messages.BlockProposal{ + Header: block.Header, + Payload: block.Payload, } // process block proposal with a wait - if err := e.onBlockProposal(originID, proposal); err != nil { + if err := e.onBlockProposal(originID, proposal, true); err != nil { return err } } @@ -216,7 +202,7 @@ func (e *Engine) onBlockResponse(originID flow.Identifier, res *messages.BlockRe } // onBlockProposal handles incoming block proposals. -func (e *Engine) onBlockProposal(originID flow.Identifier, proposal *followerBlockProposal) error { +func (e *Engine) onBlockProposal(originID flow.Identifier, proposal *messages.BlockProposal, inRangeBlockResponse bool) error { span, ctx, _ := e.tracer.StartBlockSpan(context.Background(), proposal.Header.ID(), trace.FollowerOnBlockProposal) defer span.Finish() @@ -287,7 +273,7 @@ func (e *Engine) onBlockProposal(originID flow.Identifier, proposal *followerBlo if found { // add the block to the cache - _ = e.pending.Add(originID, proposal.BlockProposal) + _ = e.pending.Add(originID, proposal) // go to the first missing ancestor ancestorID := ancestor.Header.ParentID @@ -317,7 +303,7 @@ func (e *Engine) onBlockProposal(originID flow.Identifier, proposal *followerBlo _, err = e.headers.ByBlockID(header.ParentID) if errors.Is(err, storage.ErrNotFound) { - _ = e.pending.Add(originID, proposal.BlockProposal) + _ = e.pending.Add(originID, proposal) log.Debug().Msg("requesting missing parent for proposal") @@ -331,7 +317,7 @@ func (e *Engine) onBlockProposal(originID flow.Identifier, proposal *followerBlo // at this point, we should be able to connect the proposal to the finalized // state and should process it to see whether to forward to hotstuff or not - err = e.processBlockAndDescendants(ctx, proposal) + err = e.processBlockAndDescendants(ctx, proposal, inRangeBlockResponse) if err != nil { return fmt.Errorf("could not process block proposal: %w", err) } @@ -349,7 +335,7 @@ func (e *Engine) onBlockProposal(originID flow.Identifier, proposal *followerBlo // The function assumes that `proposal` is connected to the finalized state. By induction, // any children are therefore also connected to the finalized state and can be processed as well. // No errors are expected during normal operations. -func (e *Engine) processBlockAndDescendants(ctx context.Context, proposal *followerBlockProposal) error { +func (e *Engine) processBlockAndDescendants(ctx context.Context, proposal *messages.BlockProposal, inRangeBlockResponse bool) error { span, ctx := e.tracer.StartSpanFromContext(ctx, trace.FollowerProcessBlockProposal) defer span.Finish() @@ -405,10 +391,11 @@ func (e *Engine) processBlockAndDescendants(ctx context.Context, proposal *follo log.Info().Msg("forwarding block proposal to hotstuff") // submit the model to follower for processing - if proposal.Wait { + if inRangeBlockResponse { select { case <-e.follower.SubmitProposal(header, parent.View): - // wait until the block is processed when the proposal is coming from a range response + // we wait if the block proposal was part of a range block response since + // processing block n+1 before block n has finished will fail break case <-time.After(time.Millisecond * 200): // this shouldn't happen very often. 99.8% of proposals are processed within 150ms @@ -416,11 +403,12 @@ func (e *Engine) processBlockAndDescendants(ctx context.Context, proposal *follo break } } else { + // ignore returned channel to avoid waiting e.follower.SubmitProposal(header, parent.View) } // check for any descendants of the block to process - err = e.processPendingChildren(ctx, header) + err = e.processPendingChildren(ctx, header, inRangeBlockResponse) if err != nil { return fmt.Errorf("could not process pending children: %w", err) } @@ -431,7 +419,7 @@ func (e *Engine) processBlockAndDescendants(ctx context.Context, proposal *follo // processPendingChildren checks if there are proposals connected to the given // parent block that was just processed; if this is the case, they should now // all be validly connected to the finalized state and we should process them. -func (e *Engine) processPendingChildren(ctx context.Context, header *flow.Header) error { +func (e *Engine) processPendingChildren(ctx context.Context, header *flow.Header, inRangeBlockResponse bool) error { span, ctx := e.tracer.StartSpanFromContext(ctx, trace.FollowerProcessPendingChildren) defer span.Finish() @@ -447,14 +435,11 @@ func (e *Engine) processPendingChildren(ctx context.Context, header *flow.Header // then try to process children only this once var result *multierror.Error for _, child := range children { - proposal := &followerBlockProposal{ - BlockProposal: &messages.BlockProposal{ - Header: child.Header, - Payload: child.Payload, - }, - Wait: false, + proposal := &messages.BlockProposal{ + Header: child.Header, + Payload: child.Payload, } - err := e.processBlockAndDescendants(ctx, proposal) + err := e.processBlockAndDescendants(ctx, proposal, inRangeBlockResponse) if err != nil { result = multierror.Append(result, err) } From 5377200f84715577a3650b1d453d2848a98fbf67 Mon Sep 17 00:00:00 2001 From: Daniel Holmes <43529937+danielholmes839@users.noreply.github.com> Date: Thu, 7 Jul 2022 13:06:30 -0400 Subject: [PATCH 094/223] wrap error message Co-authored-by: Leo Zhang --- engine/common/follower/engine.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/engine/common/follower/engine.go b/engine/common/follower/engine.go index f9c9d05c1c6..97fd00a41ad 100644 --- a/engine/common/follower/engine.go +++ b/engine/common/follower/engine.go @@ -186,7 +186,7 @@ func (e *Engine) onSyncedBlock(originID flow.Identifier, synced *events.SyncedBl } func (e *Engine) onBlockResponse(originID flow.Identifier, res *messages.BlockResponse) error { - for _, block := range res.Blocks { + for i, block := range res.Blocks { proposal := &messages.BlockProposal{ Header: block.Header, Payload: block.Payload, @@ -194,7 +194,7 @@ func (e *Engine) onBlockResponse(originID flow.Identifier, res *messages.BlockRe // process block proposal with a wait if err := e.onBlockProposal(originID, proposal, true); err != nil { - return err + return fmt.Errorf("fail to process the block at index %v in a range block response that contains %v blocks: %w", i, len(res.Blocks), err) } } From 4c333280c36c4c44f1e3cf23fbfdc3286ac0bc92 Mon Sep 17 00:00:00 2001 From: Daniel Holmes <43529937+danielholmes839@users.noreply.github.com> Date: Thu, 7 Jul 2022 13:07:28 -0400 Subject: [PATCH 095/223] comments Co-authored-by: Leo Zhang --- engine/common/follower/engine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/common/follower/engine.go b/engine/common/follower/engine.go index 97fd00a41ad..f4035a0c45a 100644 --- a/engine/common/follower/engine.go +++ b/engine/common/follower/engine.go @@ -398,7 +398,7 @@ func (e *Engine) processBlockAndDescendants(ctx context.Context, proposal *messa // processing block n+1 before block n has finished will fail break case <-time.After(time.Millisecond * 200): - // this shouldn't happen very often. 99.8% of proposals are processed within 150ms + // this shouldn't happen very often. 99.8% of proposals are processed within 200ms e.log.Warn().Msg("HotStuffFollower SubmitProposal timeout") break } From 50114c694c366c68a49600938bac9b17d4e6b122 Mon Sep 17 00:00:00 2001 From: Daniel Holmes <43529937+danielholmes839@users.noreply.github.com> Date: Thu, 7 Jul 2022 13:07:44 -0400 Subject: [PATCH 096/223] Update engine/common/follower/engine.go Co-authored-by: Leo Zhang --- engine/common/follower/engine.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/engine/common/follower/engine.go b/engine/common/follower/engine.go index f4035a0c45a..f1b768f0a0a 100644 --- a/engine/common/follower/engine.go +++ b/engine/common/follower/engine.go @@ -394,8 +394,13 @@ func (e *Engine) processBlockAndDescendants(ctx context.Context, proposal *messa if inRangeBlockResponse { select { case <-e.follower.SubmitProposal(header, parent.View): - // we wait if the block proposal was part of a range block response since - // processing block n+1 before block n has finished will fail + // after submitting proposal to hotstuff, then hotstuff will start processing block n, and follower + // engine is concurrently processing block n + 1. + // however follower engine will fail to process block n + 1 if block n is not saved in protocol state. + // Block n is only saved in protocol state when hotstuff finishes processing block n. + // In order to ensure follower engine don't process block n + 1 too early, we wait until hotstuff finish + // processing block n. + // this wait is only needed when processing range block response, since blocks are processed in order. break case <-time.After(time.Millisecond * 200): // this shouldn't happen very often. 99.8% of proposals are processed within 200ms From 4bf7a2ac719fa320a809c30c5ef09104a8511219 Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Thu, 7 Jul 2022 13:31:48 -0400 Subject: [PATCH 097/223] comments and go fmt --- engine/common/follower/engine.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/engine/common/follower/engine.go b/engine/common/follower/engine.go index f1b768f0a0a..d0795cdaa7b 100644 --- a/engine/common/follower/engine.go +++ b/engine/common/follower/engine.go @@ -201,7 +201,7 @@ func (e *Engine) onBlockResponse(originID flow.Identifier, res *messages.BlockRe return nil } -// onBlockProposal handles incoming block proposals. +// onBlockProposal handles incoming block proposals. inRangeBlockResponse will determine whether or not we should wait in processBlockAndDescendants func (e *Engine) onBlockProposal(originID flow.Identifier, proposal *messages.BlockProposal, inRangeBlockResponse bool) error { span, ctx, _ := e.tracer.StartBlockSpan(context.Background(), proposal.Header.ID(), trace.FollowerOnBlockProposal) @@ -395,7 +395,7 @@ func (e *Engine) processBlockAndDescendants(ctx context.Context, proposal *messa select { case <-e.follower.SubmitProposal(header, parent.View): // after submitting proposal to hotstuff, then hotstuff will start processing block n, and follower - // engine is concurrently processing block n + 1. + // engine is concurrently processing block n + 1. // however follower engine will fail to process block n + 1 if block n is not saved in protocol state. // Block n is only saved in protocol state when hotstuff finishes processing block n. // In order to ensure follower engine don't process block n + 1 too early, we wait until hotstuff finish From 19f6a66905d97c29dfbb71e11ad1e63331f54886 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 7 Jul 2022 17:01:22 -0700 Subject: [PATCH 098/223] add signer ids to access API fix access/handler fix tests fix tests --- access/handler.go | 55 ++++++++++++++++++++++------ engine/access/access_test.go | 35 ++++++++++++++---- engine/access/rpc/engine.go | 2 +- engine/access/rpc/engine_builder.go | 5 ++- engine/common/rpc/convert/convert.go | 8 ++-- engine/execution/rpc/engine.go | 19 ++++++++-- state/protocol/util.go | 23 ++++++++++++ 7 files changed, 119 insertions(+), 28 deletions(-) diff --git a/access/handler.go b/access/handler.go index 1b5b42e21c2..79ba31996c6 100644 --- a/access/handler.go +++ b/access/handler.go @@ -11,17 +11,20 @@ import ( "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" ) type Handler struct { api API chain flow.Chain + state protocol.State } -func NewHandler(api API, chain flow.Chain) *Handler { +func NewHandler(api API, chain flow.Chain, state protocol.State) *Handler { return &Handler{ api: api, chain: chain, + state: state, } } @@ -56,7 +59,12 @@ func (h *Handler) GetLatestBlockHeader( return nil, err } - return blockHeaderResponse(header) + signerIDs, err := protocol.DecodeSignerIDs(h.state, header) + if err != nil { + return nil, err + } + + return blockHeaderResponse(header, signerIDs) } // GetBlockHeaderByHeight gets a block header by height. @@ -69,7 +77,12 @@ func (h *Handler) GetBlockHeaderByHeight( return nil, err } - return blockHeaderResponse(header) + signerIDs, err := protocol.DecodeSignerIDs(h.state, header) + if err != nil { + return nil, err + } + + return blockHeaderResponse(header, signerIDs) } // GetBlockHeaderByID gets a block header by ID. @@ -87,7 +100,12 @@ func (h *Handler) GetBlockHeaderByID( return nil, err } - return blockHeaderResponse(header) + signerIDs, err := protocol.DecodeSignerIDs(h.state, header) + if err != nil { + return nil, err + } + + return blockHeaderResponse(header, signerIDs) } // GetLatestBlock gets the latest sealed block. @@ -100,7 +118,12 @@ func (h *Handler) GetLatestBlock( return nil, err } - return blockResponse(block, req.GetFullBlockResponse()) + signerIDs, err := protocol.DecodeSignerIDs(h.state, block.Header) + if err != nil { + return nil, err + } + + return blockResponse(block, signerIDs, req.GetFullBlockResponse()) } // GetBlockByHeight gets a block by height. @@ -113,7 +136,12 @@ func (h *Handler) GetBlockByHeight( return nil, err } - return blockResponse(block, req.GetFullBlockResponse()) + signerIDs, err := protocol.DecodeSignerIDs(h.state, block.Header) + if err != nil { + return nil, err + } + + return blockResponse(block, signerIDs, req.GetFullBlockResponse()) } // GetBlockByID gets a block by ID. @@ -131,7 +159,12 @@ func (h *Handler) GetBlockByID( return nil, err } - return blockResponse(block, req.GetFullBlockResponse()) + signerIDs, err := protocol.DecodeSignerIDs(h.state, block.Header) + if err != nil { + return nil, err + } + + return blockResponse(block, signerIDs, req.GetFullBlockResponse()) } // GetCollectionByID gets a collection by ID. @@ -486,11 +519,11 @@ func (h *Handler) GetExecutionResultForBlockID(ctx context.Context, req *access. return executionResultToMessages(result) } -func blockResponse(block *flow.Block, fullResponse bool) (*access.BlockResponse, error) { +func blockResponse(block *flow.Block, signerIDs []flow.Identifier, fullResponse bool) (*access.BlockResponse, error) { var msg *entities.Block var err error if fullResponse { - msg, err = convert.BlockToMessage(block) + msg, err = convert.BlockToMessage(block, signerIDs) if err != nil { return nil, err } @@ -502,8 +535,8 @@ func blockResponse(block *flow.Block, fullResponse bool) (*access.BlockResponse, }, nil } -func blockHeaderResponse(header *flow.Header) (*access.BlockHeaderResponse, error) { - msg, err := convert.BlockHeaderToMessage(header) +func blockHeaderResponse(header *flow.Header, signerIDs []flow.Identifier) (*access.BlockHeaderResponse, error) { + msg, err := convert.BlockHeaderToMessage(header, signerIDs) if err != nil { return nil, err } diff --git a/engine/access/access_test.go b/engine/access/access_test.go index 8b7c2dca759..ed4dfe236d7 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -35,6 +35,7 @@ import ( "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" + protocolInterface "github.com/onflow/flow-go/state/protocol" protocol "github.com/onflow/flow-go/state/protocol/mock" storage "github.com/onflow/flow-go/storage/badger" "github.com/onflow/flow-go/storage/badger/operation" @@ -125,7 +126,7 @@ func (suite *Suite) RunTest( backend.DefaultSnapshotHistoryLimit, ) - handler := access.NewHandler(suite.backend, suite.chainID.Chain()) + handler := access.NewHandler(suite.backend, suite.chainID.Chain(), suite.state) f(handler, db, blocks, headers, results) }) @@ -301,7 +302,7 @@ func (suite *Suite) TestSendTransactionToRandomCollectionNode() { backend.DefaultSnapshotHistoryLimit, ) - handler := access.NewHandler(backend, suite.chainID.Chain()) + handler := access.NewHandler(backend, suite.chainID.Chain(), suite.state) // Send transaction 1 resp, err := handler.SendTransaction(context.Background(), sendReq1) @@ -340,24 +341,40 @@ func (suite *Suite) TestSendTransactionToRandomCollectionNode() { func (suite *Suite) TestGetBlockByIDAndHeight() { suite.RunTest(func(handler *access.Handler, db *badger.DB, blocks *storage.Blocks, _ *storage.Headers, _ *storage.ExecutionResults) { + // the default header fixture creates a signerIDs out of 10 nodes committee, so we prepare a committee same as that + allConsensus := unittest.IdentityListFixture(40, unittest.WithRole(flow.RoleConsensus)) + + voterIndices, err := signature.EncodeSignersToIndices(allConsensus.NodeIDs(), allConsensus.NodeIDs()) + require.NoError(suite.T(), err) // test block1 get by ID block1 := unittest.BlockFixture() + block1.Header.ParentVoterIndices = voterIndices // test block2 get by height block2 := unittest.BlockFixture() block2.Header.Height = 2 + block2.Header.ParentVoterIndices = voterIndices require.NoError(suite.T(), blocks.Store(&block1)) require.NoError(suite.T(), blocks.Store(&block2)) // the follower logic should update height index on the block storage when a block is finalized - err := db.Update(operation.IndexBlockHeight(block2.Header.Height, block2.ID())) + err = db.Update(operation.IndexBlockHeight(block2.Header.Height, block2.ID())) require.NoError(suite.T(), err) + snapshotForSignerIDs := new(protocol.Snapshot) + snapshotForSignerIDs.On("Identities", mock.Anything).Return(allConsensus, nil) + suite.state.On("AtBlockID", block1.Header.ID()).Return(snapshotForSignerIDs, nil) + suite.state.On("AtBlockID", block2.Header.ID()).Return(snapshotForSignerIDs, nil) + assertHeaderResp := func(resp *accessproto.BlockHeaderResponse, err error, header *flow.Header) { + suite.state.On("AtBlockID", header.ID()).Return(snapshotForSignerIDs, nil) + expectedSignerIDs, err := protocolInterface.DecodeSignerIDs(suite.state, header) + require.NoError(suite.T(), err) + require.NoError(suite.T(), err) require.NotNil(suite.T(), resp) actual := *resp.Block - expectedMessage, err := convert.BlockHeaderToMessage(header) + expectedMessage, err := convert.BlockHeaderToMessage(header, expectedSignerIDs) require.NoError(suite.T(), err) require.Equal(suite.T(), *expectedMessage, actual) expectedBlockHeader, err := convert.MessageToBlockHeader(&actual) @@ -369,7 +386,10 @@ func (suite *Suite) TestGetBlockByIDAndHeight() { require.NoError(suite.T(), err) require.NotNil(suite.T(), resp) actual := resp.Block - expectedMessage, err := convert.BlockToMessage(block) + suite.state.On("AtBlockID", block.Header.ID()).Return(snapshotForSignerIDs, nil) + expectedSignerIDs, err := protocolInterface.DecodeSignerIDs(suite.state, block.Header) + require.NoError(suite.T(), err) + expectedMessage, err := convert.BlockToMessage(block, expectedSignerIDs) require.NoError(suite.T(), err) require.Equal(suite.T(), expectedMessage, actual) expectedBlock, err := convert.MessageToBlock(resp.Block) @@ -388,6 +408,7 @@ func (suite *Suite) TestGetBlockByIDAndHeight() { suite.Run("get header 1 by ID", func() { // get header by ID id := block1.ID() + suite.state.On("AtBlockID", block1.Header.ID()).Return(snapshotForSignerIDs, nil) req := &accessproto.GetBlockHeaderByIDRequest{ Id: id[:], } @@ -611,7 +632,7 @@ func (suite *Suite) TestGetSealedTransaction() { backend.DefaultSnapshotHistoryLimit, ) - handler := access.NewHandler(backend, suite.chainID.Chain()) + handler := access.NewHandler(backend, suite.chainID.Chain(), suite.state) rpcEngBuilder, err := rpc.NewBuilder(suite.log, suite.state, rpc.Config{}, nil, nil, blocks, headers, collections, transactions, receipts, results, suite.chainID, metrics, metrics, 0, 0, false, false, nil, nil) @@ -704,7 +725,7 @@ func (suite *Suite) TestExecuteScript() { backend.DefaultSnapshotHistoryLimit, ) - handler := access.NewHandler(suite.backend, suite.chainID.Chain()) + handler := access.NewHandler(suite.backend, suite.chainID.Chain(), suite.state) // initialize metrics related storage metrics := metrics.NewNoopCollector() diff --git a/engine/access/rpc/engine.go b/engine/access/rpc/engine.go index a4a45ff1a0a..ba3a967ecf8 100644 --- a/engine/access/rpc/engine.go +++ b/engine/access/rpc/engine.go @@ -184,7 +184,7 @@ func NewBuilder(log zerolog.Logger, chain: chainID.Chain(), } - builder := NewRPCEngineBuilder(eng) + builder := NewRPCEngineBuilder(eng, state) if rpcMetricsEnabled { builder.WithMetrics() } diff --git a/engine/access/rpc/engine_builder.go b/engine/access/rpc/engine_builder.go index c7987d292f2..636fe72c67e 100644 --- a/engine/access/rpc/engine_builder.go +++ b/engine/access/rpc/engine_builder.go @@ -6,16 +6,17 @@ import ( legacyaccessproto "github.com/onflow/flow/protobuf/go/flow/legacy/access" "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/state/protocol" legacyaccess "github.com/onflow/flow-go/access/legacy" "github.com/onflow/flow-go/apiproxy" ) // NewRPCEngineBuilder helps to build a new RPC engine. -func NewRPCEngineBuilder(engine *Engine) *RPCEngineBuilder { +func NewRPCEngineBuilder(engine *Engine, state protocol.State) *RPCEngineBuilder { builder := &RPCEngineBuilder{} builder.Engine = engine - builder.localAPIServer = access.NewHandler(builder.backend, builder.chain) + builder.localAPIServer = access.NewHandler(builder.backend, builder.chain, state) return builder } diff --git a/engine/common/rpc/convert/convert.go b/engine/common/rpc/convert/convert.go index d8dfa090d06..4f506ead943 100644 --- a/engine/common/rpc/convert/convert.go +++ b/engine/common/rpc/convert/convert.go @@ -137,10 +137,11 @@ func TransactionToMessage(tb flow.TransactionBody) *entities.Transaction { } } -func BlockHeaderToMessage(h *flow.Header) (*entities.BlockHeader, error) { +func BlockHeaderToMessage(h *flow.Header, signerIDs []flow.Identifier) (*entities.BlockHeader, error) { id := h.ID() t := timestamppb.New(h.Timestamp) + parentVoterIds := IdentifiersToMessages(signerIDs) return &entities.BlockHeader{ Id: id[:], @@ -150,6 +151,7 @@ func BlockHeaderToMessage(h *flow.Header) (*entities.BlockHeader, error) { Timestamp: t, View: h.View, ParentVoterIndices: h.ParentVoterIndices, + ParentVoterIds: parentVoterIds, ParentVoterSigData: h.ParentVoterSigData, ProposerId: h.ProposerID[:], ProposerSigData: h.ProposerSigData, @@ -245,7 +247,7 @@ func MessagesToExecutionResults(m []*entities.ExecutionResult) ([]*flow.Executio return execResults, nil } -func BlockToMessage(h *flow.Block) (*entities.Block, error) { +func BlockToMessage(h *flow.Block, signerIDs []flow.Identifier) (*entities.Block, error) { id := h.ID() @@ -260,7 +262,7 @@ func BlockToMessage(h *flow.Block) (*entities.Block, error) { return nil, err } - blockHeader, err := BlockHeaderToMessage(h.Header) + blockHeader, err := BlockHeaderToMessage(h.Header, signerIDs) if err != nil { return nil, err } diff --git a/engine/execution/rpc/engine.go b/engine/execution/rpc/engine.go index 14f45f33d06..d951cb7d4b9 100644 --- a/engine/execution/rpc/engine.go +++ b/engine/execution/rpc/engine.go @@ -535,7 +535,13 @@ func (h *handler) GetLatestBlockHeader( if err != nil { return nil, status.Errorf(codes.NotFound, "not found: %v", err) } - return blockHeaderResponse(header) + + signerIDs, err := protocol.DecodeSignerIDs(h.state, header) + if err != nil { + return nil, err + } + + return blockHeaderResponse(header, signerIDs) } // GetBlockHeaderByID gets a block header by ID. @@ -552,11 +558,16 @@ func (h *handler) GetBlockHeaderByID( return nil, status.Errorf(codes.NotFound, "not found: %v", err) } - return blockHeaderResponse(header) + signerIDs, err := protocol.DecodeSignerIDs(h.state, header) + if err != nil { + return nil, err + } + + return blockHeaderResponse(header, signerIDs) } -func blockHeaderResponse(header *flow.Header) (*execution.BlockHeaderResponse, error) { - msg, err := convert.BlockHeaderToMessage(header) +func blockHeaderResponse(header *flow.Header, signerIDs []flow.Identifier) (*execution.BlockHeaderResponse, error) { + msg, err := convert.BlockHeaderToMessage(header, signerIDs) if err != nil { return nil, err } diff --git a/state/protocol/util.go b/state/protocol/util.go index 0c6392ebaf2..03c87655e52 100644 --- a/state/protocol/util.go +++ b/state/protocol/util.go @@ -96,3 +96,26 @@ func FindGuarantors(state State, guarantee *flow.CollectionGuarantee) ([]flow.Id return guarantorIDs, nil } + +// DecodeSignerIDs decodes the signer indices from the given block header, and finds the signer identifiers from protocol state +// Expected Error returns during normal operations: +// * storage.ErrNotFound if block not found for the given header +// * signature.InvalidSignerIndicesError if `signerIndices` does not encode a valid set of consensus committiee +func DecodeSignerIDs(state State, header *flow.Header) ([]flow.Identifier, error) { + // root block does not have signer indices + if header.ParentVoterIndices == nil && header.View == 0 { + return []flow.Identifier{}, nil + } + snapshot := state.AtBlockID(header.ID()) + members, err := snapshot.Identities(filter.HasRole(flow.RoleConsensus)) + if err != nil { + return nil, fmt.Errorf("fail to retrieve identities for block %v: %w", header.ID(), err) + } + + signerIDs, err := signature.DecodeSignerIndicesToIdentifiers(members.NodeIDs(), header.ParentVoterIndices) + if err != nil { + return nil, fmt.Errorf("could not decode signer indices for block %v: %w", header.ID(), err) + } + + return signerIDs, nil +} From 240917cecd10f4e1bb276ebcca9b78b177e7e86b Mon Sep 17 00:00:00 2001 From: Andrew Meyer Date: Fri, 8 Jul 2022 07:46:16 -0700 Subject: [PATCH 099/223] Add Andrew Meyer to performance stream Adding myself to the performance stream as suggested by @SaveTheRbtz --- CODEOWNERS | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index c3182f0de2e..51b04cbcbd1 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -37,7 +37,7 @@ /cmd/bootstrap/** @vishalchangrani # Performance Stream -/integration/localnet/** @SaveTheRbtz @simonhf @Kay-Zee @zhangchiqing @pattyshack -/integration/loader/** @SaveTheRbtz @simonhf @Kay-Zee @pattyshack -/integration/benchmark/** @SaveTheRbtz @simonhf @Kay-Zee @pattyshack -/integration/utils/** @SaveTheRbtz @simonhf @Kay-Zee @pattyshack +/integration/localnet/** @SaveTheRbtz @simonhf @Kay-Zee @zhangchiqing @pattyshack @AndrewM-SDET +/integration/loader/** @SaveTheRbtz @simonhf @Kay-Zee @pattyshack @AndrewM-SDET +/integration/benchmark/** @SaveTheRbtz @simonhf @Kay-Zee @pattyshack @AndrewM-SDET +/integration/utils/** @SaveTheRbtz @simonhf @Kay-Zee @pattyshack @AndrewM-SDET From ab3035aa99d331521e42470ee650bf1ed7e89767 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Fri, 8 Jul 2022 17:43:15 +0100 Subject: [PATCH 100/223] add tests and test fix --- engine/execution/computation/manager_test.go | 54 ++++++++++++++++ fvm/accounts_test.go | 66 ++++++++++++++++++-- module/mock/wal_metrics.go | 25 ++++++++ 3 files changed, 141 insertions(+), 4 deletions(-) create mode 100644 module/mock/wal_metrics.go diff --git a/engine/execution/computation/manager_test.go b/engine/execution/computation/manager_test.go index 03481d134ff..7f569bb7e26 100644 --- a/engine/execution/computation/manager_test.go +++ b/engine/execution/computation/manager_test.go @@ -250,6 +250,60 @@ func TestExecuteScript(t *testing.T) { require.NoError(t, err) } +// Balance script used to swallow errors, which meant that even if the view was empty, a script that did nothing but get +// the balance of an account would succeed and return 0. +func TestExecuteScript_BalanceScriptFailsIfViewIsEmpty(t *testing.T) { + + logger := zerolog.Nop() + + execCtx := fvm.NewContext(logger) + + me := new(module.Local) + me.On("NodeID").Return(flow.ZeroID) + + rt := fvm.NewInterpreterRuntime() + + vm := fvm.NewVirtualMachine(rt) + + view := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + return nil, fmt.Errorf("error getting register") + }) + + scriptView := view.NewChild() + + script := []byte(fmt.Sprintf( + ` + pub fun main(): UFix64 { + return getAccount(%s).balance + } + `, + fvm.FungibleTokenAddress(execCtx.Chain).HexWithPrefix(), + )) + + eds := new(state_synchronization.ExecutionDataService) + edCache := new(state_synchronization.ExecutionDataCIDCache) + + engine, err := New(logger, + metrics.NewNoopCollector(), + trace.NewNoopTracer(), + me, + nil, + vm, + execCtx, + DefaultProgramsCacheSize, + committer.NewNoopViewCommitter(), + scriptLogThreshold, + DefaultScriptExecutionTimeLimit, + nil, + eds, + edCache) + require.NoError(t, err) + + header := unittest.BlockHeaderFixture() + _, err = engine.ExecuteScript(context.Background(), script, nil, &header, scriptView) + require.ErrorContains(t, err, "error getting register") +} + func TestExecuteScripPanicsAreHandled(t *testing.T) { ctx := fvm.NewContext(zerolog.Nop()) diff --git a/fvm/accounts_test.go b/fvm/accounts_test.go index 5c3c41a1581..7201842ec1d 100644 --- a/fvm/accounts_test.go +++ b/fvm/accounts_test.go @@ -2,6 +2,7 @@ package fvm_test import ( "fmt" + "github.com/onflow/flow-go/engine/execution/state/delta" "strconv" "testing" @@ -1252,7 +1253,10 @@ func TestAccountBalanceFields(t *testing.T) { }), ) - t.Run("Get balance fails for accounts that dont exist", + // TODO - this is because get account + borrow returns + // empty values instead of failing for an account that doesnt exist + // this behavior needs to addressed on Cadence side + t.Run("Get balance returns 0 for accounts that don't exist", newVMTest().withContextOptions( fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(zerolog.Nop())), fvm.WithCadenceLogging(true), @@ -1271,7 +1275,32 @@ func TestAccountBalanceFields(t *testing.T) { err = vm.Run(ctx, script, view, programs) require.NoError(t, err) - require.Error(t, script.Err) + require.NoError(t, script.Err) + require.Equal(t, cadence.UFix64(0), script.Value) + }), + ) + + t.Run("Get balance fails if view returns an error", + newVMTest().withContextOptions( + fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(zerolog.Nop())), + fvm.WithCadenceLogging(true), + ). + run(func(t *testing.T, vm *fvm.VirtualMachine, chain flow.Chain, ctx fvm.Context, view state.View, programs *programs.Programs) { + address := chain.ServiceAddress() + + script := fvm.Script([]byte(fmt.Sprintf(` + pub fun main(): UFix64 { + let acc = getAccount(0x%s) + return acc.balance + } + `, address))) + + view = delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + return nil, fmt.Errorf("error getting register %s, %s", flow.BytesToAddress([]byte(owner)).Hex(), key) + }) + + err := vm.Run(ctx, script, view, programs) + require.ErrorContains(t, err, fmt.Sprintf("error getting register %s, %s", address.Hex(), state.KeyAccountStatus)) }), ) @@ -1417,7 +1446,7 @@ func TestGetStorageCapacity(t *testing.T) { require.Equal(t, cadence.UInt64(10_010_000), script.Value) }), ) - t.Run("Get storage capacity fails for accounts that don't exist", + t.Run("Get storage capacity returns 0 for accounts that don't exist", newVMTest().withContextOptions( fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(zerolog.Nop())), fvm.WithCadenceLogging(true), @@ -1441,7 +1470,36 @@ func TestGetStorageCapacity(t *testing.T) { err = vm.Run(ctx, script, view, programs) require.NoError(t, err) - require.Error(t, script.Err) + require.NoError(t, script.Err) + require.Equal(t, cadence.UInt64(0), script.Value) + }), + ) + t.Run("Get storage capacity fails if view returns an error", + newVMTest().withContextOptions( + fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(zerolog.Nop())), + fvm.WithCadenceLogging(true), + fvm.WithAccountStorageLimit(false), + ).withBootstrapProcedureOptions( + fvm.WithStorageMBPerFLOW(1_000_000_000), + fvm.WithAccountCreationFee(100_000), + fvm.WithMinimumStorageReservation(100_000), + ). + run(func(t *testing.T, vm *fvm.VirtualMachine, chain flow.Chain, ctx fvm.Context, view state.View, programs *programs.Programs) { + address := chain.ServiceAddress() + + script := fvm.Script([]byte(fmt.Sprintf(` + pub fun main(): UInt64 { + let acc = getAccount(0x%s) + return acc.storageCapacity + } + `, address))) + + view = delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + return nil, fmt.Errorf("error getting register %s, %s", flow.BytesToAddress([]byte(owner)).Hex(), key) + }) + + err := vm.Run(ctx, script, view, programs) + require.ErrorContains(t, err, fmt.Sprintf("error getting register %s, %s", address.Hex(), state.KeyAccountStatus)) }), ) } diff --git a/module/mock/wal_metrics.go b/module/mock/wal_metrics.go new file mode 100644 index 00000000000..960a1d5f354 --- /dev/null +++ b/module/mock/wal_metrics.go @@ -0,0 +1,25 @@ +// Code generated by mockery v2.13.0. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// WALMetrics is an autogenerated mock type for the WALMetrics type +type WALMetrics struct { + mock.Mock +} + +type NewWALMetricsT interface { + mock.TestingT + Cleanup(func()) +} + +// NewWALMetrics creates a new instance of WALMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewWALMetrics(t NewWALMetricsT) *WALMetrics { + mock := &WALMetrics{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} From f27af77b31dabd14c8e9571663f5a1d2a952aaf6 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Fri, 8 Jul 2022 18:41:08 +0100 Subject: [PATCH 101/223] lint fix --- engine/execution/computation/manager_test.go | 2 +- fvm/accounts_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/engine/execution/computation/manager_test.go b/engine/execution/computation/manager_test.go index 7f569bb7e26..58565b7b9a3 100644 --- a/engine/execution/computation/manager_test.go +++ b/engine/execution/computation/manager_test.go @@ -253,7 +253,7 @@ func TestExecuteScript(t *testing.T) { // Balance script used to swallow errors, which meant that even if the view was empty, a script that did nothing but get // the balance of an account would succeed and return 0. func TestExecuteScript_BalanceScriptFailsIfViewIsEmpty(t *testing.T) { - + logger := zerolog.Nop() execCtx := fvm.NewContext(logger) diff --git a/fvm/accounts_test.go b/fvm/accounts_test.go index 7201842ec1d..6fc75847062 100644 --- a/fvm/accounts_test.go +++ b/fvm/accounts_test.go @@ -2,7 +2,6 @@ package fvm_test import ( "fmt" - "github.com/onflow/flow-go/engine/execution/state/delta" "strconv" "testing" @@ -13,6 +12,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/engine/execution/state/delta" "github.com/onflow/flow-go/engine/execution/testutil" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/fvm/programs" From f567d37fd2c208bbf6f10a2f42b2fe234a01c440 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Fri, 8 Jul 2022 18:43:17 +0100 Subject: [PATCH 102/223] remove wal_metrics --- module/mock/wal_metrics.go | 25 ------------------------- 1 file changed, 25 deletions(-) delete mode 100644 module/mock/wal_metrics.go diff --git a/module/mock/wal_metrics.go b/module/mock/wal_metrics.go deleted file mode 100644 index 960a1d5f354..00000000000 --- a/module/mock/wal_metrics.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by mockery v2.13.0. DO NOT EDIT. - -package mock - -import mock "github.com/stretchr/testify/mock" - -// WALMetrics is an autogenerated mock type for the WALMetrics type -type WALMetrics struct { - mock.Mock -} - -type NewWALMetricsT interface { - mock.TestingT - Cleanup(func()) -} - -// NewWALMetrics creates a new instance of WALMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewWALMetrics(t NewWALMetricsT) *WALMetrics { - mock := &WALMetrics{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} From f79a23302520b8716850badb846ef4ef02b279dd Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Fri, 8 Jul 2022 15:38:49 -0400 Subject: [PATCH 103/223] Update network/message/authorization.go Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- network/message/authorization.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index cf356737b86..997b3c6cab3 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -248,10 +248,10 @@ func initializeMessageAuthConfigsMap() { return new(messages.EntityRequest) }, Config: map[channels.Channel]flow.RoleList{ - channels.RequestChunks: {flow.RoleConsensus, flow.RoleCollection}, - channels.RequestApprovalsByChunk: {flow.RoleConsensus, flow.RoleCollection}, - channels.RequestReceiptsByBlockID: {flow.RoleConsensus, flow.RoleCollection}, - channels.RequestCollections: {flow.RoleAccess, flow.RoleConsensus, flow.RoleCollection}, + channels.RequestChunks: {flow.RoleVerification}, + channels.RequestApprovalsByChunk: {flow.RoleConsensus}, + channels.RequestReceiptsByBlockID: {flow.RoleConsensus}, + channels.RequestCollections: {flow.RoleAccess, flow.RoleExecution}, }, } AuthorizationConfigs[EntityResponse] = MsgAuthConfig{ From 2ff4a50b4d14adbc9e11f82691f452f777784caf Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Fri, 8 Jul 2022 15:39:04 -0400 Subject: [PATCH 104/223] Update network/message/authorization.go Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- network/message/authorization.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index 997b3c6cab3..c7bea451646 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -260,10 +260,10 @@ func initializeMessageAuthConfigsMap() { return new(messages.EntityResponse) }, Config: map[channels.Channel]flow.RoleList{ - channels.RequestChunks: {flow.RoleCollection, flow.RoleExecution}, - channels.RequestCollections: {flow.RoleCollection, flow.RoleExecution}, - channels.RequestApprovalsByChunk: {flow.RoleCollection, flow.RoleExecution}, - channels.RequestReceiptsByBlockID: {flow.RoleCollection, flow.RoleExecution}, + channels.ProvideChunks: {flow.RoleExecution}, + channels.ProvideCollections: {flow.RoleCollection}, + channels.ProvideApprovalsByChunk: {flow.RoleVerification}, + channels.ProvideReceiptsByBlockID: {flow.RoleExecution}, }, } From a70c5ba60bd8abd32e51a9c8a8d8808dabaab290 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Fri, 8 Jul 2022 16:03:35 -0400 Subject: [PATCH 105/223] Update network/message/authorization.go Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- network/message/authorization.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index c7bea451646..010b5a327f3 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -118,7 +118,7 @@ func initializeMessageAuthConfigsMap() { return new(messages.BlockResponse) }, Config: map[channels.Channel]flow.RoleList{ - channels.SyncCommittee: flow.Roles(), + channels.SyncCommittee: {flow.RoleConsensus}, channels.SyncClusterPrefix: {flow.RoleCollection}, }, } From 02ab39384cfefba3aacd64ba2ed61dade2160189 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Fri, 8 Jul 2022 16:03:52 -0400 Subject: [PATCH 106/223] Update network/message/authorization.go Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- network/message/authorization.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index 010b5a327f3..e7423cf0c9c 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -88,7 +88,7 @@ func initializeMessageAuthConfigsMap() { return new(messages.SyncResponse) }, Config: map[channels.Channel]flow.RoleList{ - channels.SyncCommittee: flow.Roles(), + channels.SyncCommittee: {flow.RoleConsensus}, channels.SyncClusterPrefix: {flow.RoleCollection}, }, } From e7e98e0fdd474c2bf0e73f823564f2cd9a3acdf5 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Fri, 8 Jul 2022 16:18:22 -0400 Subject: [PATCH 107/223] add pubsub message validator check to all test --- .../authorized_sender_validator_test.go | 36 ++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index b66bbd95319..ec9fc2a53a0 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/libp2p/go-libp2p-core/peer" + pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/rs/zerolog" "github.com/stretchr/testify/suite" @@ -54,6 +55,10 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_AuthorizedSen msgType, err := validate(context.Background(), pid, c.Message) s.Require().NoError(err) s.Require().Equal(c.MessageStr, msgType) + + validatePubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), c.Channel, c.GetIdentity) + pubsubResult := validatePubsub(context.Background(), pid, c.Message) + s.Require().Equal(pubsub.ValidationAccept, pubsubResult) }) } } @@ -71,6 +76,10 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedS msgType, err := validate(context.Background(), pid, c.Message) s.Require().ErrorIs(err, message.ErrUnauthorizedRole) s.Require().Equal(c.MessageStr, msgType) + + validatePubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), c.Channel, c.GetIdentity) + pubsubResult := validatePubsub(context.Background(), pid, c.Message) + s.Require().Equal(pubsub.ValidationReject, pubsubResult) }) } } @@ -89,6 +98,10 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedM msgType, err := validate(context.Background(), pid, c.Message) s.Require().ErrorIs(err, message.ErrUnauthorizedMessageOnChannel) s.Require().Equal(c.MessageStr, msgType) + + validatePubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), c.Channel, c.GetIdentity) + pubsubResult := validatePubsub(context.Background(), pid, c.Message) + s.Require().Equal(pubsub.ValidationReject, pubsubResult) }) } } @@ -109,11 +122,19 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ClusterPrefix s.Require().NoError(err) s.Require().Equal(message.ClusterBlockResponse, msgType) + validateCollConsensusPubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), channels.ConsensusCluster(clusterID), getIdentityFunc) + pubsubResult := validateCollConsensusPubsub(context.Background(), pid, &messages.ClusterBlockResponse{}) + s.Require().Equal(pubsub.ValidationAccept, pubsubResult) + // validate collection sync cluster validateSyncCluster := AuthorizedSenderValidator(zerolog.Nop(), channels.SyncCluster(clusterID), getIdentityFunc) msgType, err = validateSyncCluster(context.Background(), pid, &messages.SyncRequest{}) s.Require().NoError(err) s.Require().Equal(message.SyncRequest, msgType) + + validateSyncClusterPubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), channels.SyncCluster(clusterID), getIdentityFunc) + pubsubResult = validateSyncClusterPubsub(context.Background(), pid, &messages.SyncRequest{}) + s.Require().Equal(pubsub.ValidationAccept, pubsubResult) } // TestValidatorCallback_ValidationFailure checks that the call back returned from AuthorizedSenderValidator returns the expected validation error. @@ -124,10 +145,15 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai getIdentityFunc := s.getIdentity(identity) pid, err := unittest.PeerIDFromFlowID(identity) s.Require().NoError(err) + validate := AuthorizedSenderValidator(zerolog.Nop(), channels.SyncCommittee, getIdentityFunc) msgType, err := validate(context.Background(), pid, &messages.SyncRequest{}) s.Require().ErrorIs(err, ErrSenderEjected) s.Require().Equal("", msgType) + + validatePubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), channels.SyncCommittee, getIdentityFunc) + pubsubResult := validatePubsub(context.Background(), pid, &messages.SyncRequest{}) + s.Require().Equal(pubsub.ValidationReject, pubsubResult) }) s.Run("unknown message type", func() { @@ -146,17 +172,21 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai pid, err := unittest.PeerIDFromFlowID(identity) s.Require().NoError(err) validate := AuthorizedSenderValidator(zerolog.Nop(), channels.ConsensusCommittee, getIdentityFunc) + validatePubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), channels.ConsensusCommittee, getIdentityFunc) // unknown message types are rejected msgType, err := validate(context.Background(), pid, m) - s.Require().ErrorIs(err, ErrUnknownMessageType) s.Require().Equal("", msgType) + pubsubResult := validatePubsub(context.Background(), pid, m) + s.Require().Equal(pubsub.ValidationReject, pubsubResult) // nil messages are rejected msgType, err = validate(context.Background(), pid, nil) s.Require().ErrorIs(err, ErrUnknownMessageType) s.Require().Equal("", msgType) + pubsubResult = validatePubsub(context.Background(), pid, nil) + s.Require().Equal(pubsub.ValidationReject, pubsubResult) }) s.Run("sender is not staked getIdentityFunc does not return identity ", func() { @@ -172,6 +202,10 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai msgType, err := validate(context.Background(), pid, &messages.SyncRequest{}) s.Require().ErrorIs(err, ErrUnauthorizedSender) s.Require().Equal("", msgType) + + validatePubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), channels.SyncCommittee, getIdentityFunc) + pubsubResult := validatePubsub(context.Background(), pid, &messages.SyncRequest{}) + s.Require().Equal(pubsub.ValidationReject, pubsubResult) }) } From bfb75e6f5f88e73eb28494b92b60d35ea6bef246 Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Thu, 7 Jul 2022 21:51:04 -0700 Subject: [PATCH 108/223] chore(loader): cleanup follower --- integration/utils/contLoadGenerator.go | 2 +- integration/utils/follower.go | 91 +++++++++++++++----------- integration/utils/follower_test.go | 20 +++++- 3 files changed, 73 insertions(+), 40 deletions(-) diff --git a/integration/utils/contLoadGenerator.go b/integration/utils/contLoadGenerator.go index 350dee99029..385b9d2bb8d 100644 --- a/integration/utils/contLoadGenerator.go +++ b/integration/utils/contLoadGenerator.go @@ -510,7 +510,7 @@ func (lg *ContLoadGenerator) sendTx(workerID int, tx *flowsdk.Transaction) (<-ch log.Trace().Msg("sending transaction") // Add watcher before sending the transaction to avoid race condition - ch := lg.follower.CompleteChanByID(tx.ID()) + ch := lg.follower.Follow(tx.ID()) err := lg.flowClient.SendTransaction(context.Background(), *tx) if err != nil { diff --git a/integration/utils/follower.go b/integration/utils/follower.go index a312b9f8698..6f2b0b5f33a 100644 --- a/integration/utils/follower.go +++ b/integration/utils/follower.go @@ -5,8 +5,6 @@ import ( "sync" "time" - "go.uber.org/atomic" - flowsdk "github.com/onflow/flow-go-sdk" "github.com/onflow/flow-go-sdk/access" "github.com/onflow/flow-go/module/metrics" @@ -15,8 +13,8 @@ import ( ) type TxFollower interface { - // CompleteChanByID returns a channel that is closed when the transaction is complete. - CompleteChanByID(ID flowsdk.Identifier) <-chan struct{} + // Follow returns a channel that is closed when the transaction is complete. + Follow(ID flowsdk.Identifier) <-chan struct{} // Height returns the last acted upon block height. Height() uint64 @@ -56,13 +54,10 @@ type txFollowerImpl struct { interval time.Duration - inprogress *atomic.Int64 - - mu *sync.RWMutex - height uint64 - blockID flowsdk.Identifier - - txToChan sync.Map + mu *sync.RWMutex + height uint64 + blockID flowsdk.Identifier + txToChan map[flowsdk.Identifier]txInfo stopped chan struct{} } @@ -83,11 +78,11 @@ func NewTxFollower(ctx context.Context, client access.Client, opts ...followerOp ctx: newCtx, cancel: cancel, logger: zerolog.Nop(), - mu: &sync.RWMutex{}, - inprogress: atomic.NewInt64(0), + mu: &sync.RWMutex{}, + txToChan: make(map[flowsdk.Identifier]txInfo), - stopped: make(chan struct{}), + stopped: make(chan struct{}, 1), interval: 100 * time.Millisecond, } @@ -104,12 +99,12 @@ func NewTxFollower(ctx context.Context, client access.Client, opts ...followerOp f.blockID = hdr.ID } - go f.follow() + go f.run() return f, nil } -func (f *txFollowerImpl) follow() { +func (f *txFollowerImpl) run() { t := time.NewTicker(f.interval) defer t.Stop() defer close(f.stopped) @@ -140,16 +135,21 @@ Loop: } for _, tx := range col.TransactionIDs { blockTxs++ - if ch, loaded := f.txToChan.LoadAndDelete(tx.Hex()); loaded { - txi := ch.(txInfo) + f.mu.Lock() + txi, ok := f.txToChan[tx] + if ok { + delete(f.txToChan, tx) + } + f.mu.Unlock() + + if ok { duration := time.Since(txi.submisionTime) f.logger.Trace(). Dur("durationInMS", duration). Hex("txID", tx.Bytes()). Msg("returned account to the pool") close(txi.C) - f.inprogress.Dec() if f.metrics != nil { f.metrics.TransactionExecuted(duration) } @@ -159,6 +159,10 @@ Loop: } } + f.mu.RLock() + inProgress := len(f.txToChan) + f.mu.RUnlock() + totalTxs += blockTxs totalUnknownTxs += blockUnknownTxs @@ -174,7 +178,7 @@ Loop: Uint64("txsTotalUnknown", totalUnknownTxs). Uint64("txsInBlock", blockTxs). Uint64("txsInBlockUnknown", blockUnknownTxs). - Int64("txsInProgress", f.inprogress.Load()). + Int("txsInProgress", inProgress). Msg("new block parsed") f.mu.Lock() @@ -186,12 +190,27 @@ Loop: } } -func (f *txFollowerImpl) CompleteChanByID(ID flowsdk.Identifier) <-chan struct{} { - txi, loaded := f.txToChan.LoadOrStore(ID.Hex(), txInfo{submisionTime: time.Now(), C: make(chan struct{})}) - if !loaded { - f.inprogress.Inc() +// Follow returns a channel that will be closed when the transaction is completed. +func (f *txFollowerImpl) Follow(ID flowsdk.Identifier) <-chan struct{} { + f.mu.Lock() + defer f.mu.Unlock() + + select { + case <-f.ctx.Done(): + // This channel is closed when the follower is stopped. + return f.stopped + default: + } + + // Return existing follower if exists. + if txi, ok := f.txToChan[ID]; ok { + return txi.C } - return txi.(txInfo).C + + // Create new one. + ch := make(chan struct{}) + f.txToChan[ID] = txInfo{submisionTime: time.Now(), C: ch} + return ch } func (f *txFollowerImpl) Height() uint64 { @@ -212,21 +231,17 @@ func (f *txFollowerImpl) Stop() { f.cancel() <-f.stopped - var toDelete []string - f.txToChan.Range( - func(key, value interface{}) bool { - close(value.(txInfo).C) - toDelete = append(toDelete, key.(string)) - return true - }, - ) - for _, val := range toDelete { - f.txToChan.Delete(val) + f.mu.Lock() + defer f.mu.Unlock() + + for k, v := range f.txToChan { + close(v.C) + delete(f.txToChan, k) } } type nopTxFollower struct { - txFollowerImpl + *txFollowerImpl closedCh chan struct{} } @@ -243,13 +258,13 @@ func NewNopTxFollower(ctx context.Context, client access.Client, opts ...followe close(closedCh) nop := &nopTxFollower{ - txFollowerImpl: *impl, + txFollowerImpl: impl, closedCh: closedCh, } return nop, nil } // CompleteChanByID always returns a closed channel. -func (nop *nopTxFollower) CompleteChanByID(ID flowsdk.Identifier) <-chan struct{} { +func (nop *nopTxFollower) Follow(ID flowsdk.Identifier) <-chan struct{} { return nop.closedCh } diff --git a/integration/utils/follower_test.go b/integration/utils/follower_test.go index a49eee62b18..9c8b6ca50be 100644 --- a/integration/utils/follower_test.go +++ b/integration/utils/follower_test.go @@ -28,6 +28,24 @@ func TestTxFollower(t *testing.T) { f.Stop() } +// TestTxFollowerFollowAfterStop creates new follower with a fixed block height and stops it. +func TestTxFollowerFollowAfterStop(t *testing.T) { + // TODO(rbtz): test against a mock client, but for now we just expire + // the context so that the followere wont be able to progress. + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + f, err := NewTxFollower( + ctx, + nil, + WithBlockHeight(2), + WithInteval(1*time.Hour), + ) + require.NoError(t, err) + f.Stop() + unittest.AssertClosesBefore(t, f.Follow(flowsdk.Identifier{}), 1*time.Second) +} + // TestNopTxFollower creates a new follower with a fixed block height and // verifies that it does not block. func TestNopTxFollower(t *testing.T) { @@ -43,6 +61,6 @@ func TestNopTxFollower(t *testing.T) { WithInteval(1*time.Hour), ) require.NoError(t, err) - unittest.AssertClosesBefore(t, f.CompleteChanByID(flowsdk.Identifier{}), 1*time.Second) + unittest.AssertClosesBefore(t, f.Follow(flowsdk.Identifier{}), 1*time.Second) f.Stop() } From 37c0aa578c4748a1d785ce92a9d6b2dedf66f59c Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Fri, 8 Jul 2022 13:47:59 -0700 Subject: [PATCH 109/223] address PR comments --- integration/utils/follower.go | 65 +++++++++++++++++++++-------------- 1 file changed, 39 insertions(+), 26 deletions(-) diff --git a/integration/utils/follower.go b/integration/utils/follower.go index 6f2b0b5f33a..f79e085780e 100644 --- a/integration/utils/follower.go +++ b/integration/utils/follower.go @@ -54,12 +54,13 @@ type txFollowerImpl struct { interval time.Duration + stopped chan struct{} + + // Following fields are protected by mu. mu *sync.RWMutex height uint64 blockID flowsdk.Identifier txToChan map[flowsdk.Identifier]txInfo - - stopped chan struct{} } type txInfo struct { @@ -79,11 +80,11 @@ func NewTxFollower(ctx context.Context, client access.Client, opts ...followerOp cancel: cancel, logger: zerolog.Nop(), + stopped: make(chan struct{}), + interval: 100 * time.Millisecond, + mu: &sync.RWMutex{}, txToChan: make(map[flowsdk.Identifier]txInfo), - - stopped: make(chan struct{}, 1), - interval: 100 * time.Millisecond, } for _, opt := range opts { @@ -95,8 +96,7 @@ func NewTxFollower(ctx context.Context, client access.Client, opts ...followerOp if err != nil { return nil, err } - f.height = hdr.Height - f.blockID = hdr.ID + f.updateFromBlockHeader(*hdr) } go f.run() @@ -136,14 +136,7 @@ Loop: for _, tx := range col.TransactionIDs { blockTxs++ - f.mu.Lock() - txi, ok := f.txToChan[tx] - if ok { - delete(f.txToChan, tx) - } - f.mu.Unlock() - - if ok { + if txi, loaded := f.loadAndDelete(tx); loaded { duration := time.Since(txi.submisionTime) f.logger.Trace(). Dur("durationInMS", duration). @@ -159,10 +152,6 @@ Loop: } } - f.mu.RLock() - inProgress := len(f.txToChan) - f.mu.RUnlock() - totalTxs += blockTxs totalUnknownTxs += blockUnknownTxs @@ -178,13 +167,10 @@ Loop: Uint64("txsTotalUnknown", totalUnknownTxs). Uint64("txsInBlock", blockTxs). Uint64("txsInBlockUnknown", blockUnknownTxs). - Int("txsInProgress", inProgress). + Int("txsInProgress", f.InProgress()). Msg("new block parsed") - f.mu.Lock() - f.height = block.Height - f.blockID = block.ID - f.mu.Unlock() + f.updateFromBlockHeader(block.BlockHeader) lastBlockTime = time.Now() } @@ -213,6 +199,33 @@ func (f *txFollowerImpl) Follow(ID flowsdk.Identifier) <-chan struct{} { return ch } +func (f *txFollowerImpl) loadAndDelete(tx flowsdk.Identifier) (txInfo, bool) { + f.mu.Lock() + defer f.mu.Unlock() + + txi, ok := f.txToChan[tx] + if ok { + delete(f.txToChan, tx) + } + return txi, ok +} + +func (f *txFollowerImpl) updateFromBlockHeader(block flowsdk.BlockHeader) { + f.mu.Lock() + defer f.mu.Unlock() + + f.height = block.Height + f.blockID = block.ID +} + +// InProgress returns the number of transactions in progress. +func (f *txFollowerImpl) InProgress() int { + f.mu.RLock() + defer f.mu.RUnlock() + + return len(f.txToChan) +} + func (f *txFollowerImpl) Height() uint64 { f.mu.RLock() defer f.mu.RUnlock() @@ -234,10 +247,10 @@ func (f *txFollowerImpl) Stop() { f.mu.Lock() defer f.mu.Unlock() - for k, v := range f.txToChan { + for _, v := range f.txToChan { close(v.C) - delete(f.txToChan, k) } + f.txToChan = make(map[flowsdk.Identifier]txInfo) } type nopTxFollower struct { From 348c6e6d4e1cfdf76dae9178f501d400f9b8c410 Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Fri, 8 Jul 2022 14:41:07 -0700 Subject: [PATCH 110/223] cleanup comments --- integration/utils/follower.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/integration/utils/follower.go b/integration/utils/follower.go index f79e085780e..7a7295d6817 100644 --- a/integration/utils/follower.go +++ b/integration/utils/follower.go @@ -226,6 +226,7 @@ func (f *txFollowerImpl) InProgress() int { return len(f.txToChan) } +// Height returns the latest block height. func (f *txFollowerImpl) Height() uint64 { f.mu.RLock() defer f.mu.RUnlock() @@ -233,6 +234,7 @@ func (f *txFollowerImpl) Height() uint64 { return f.height } +// BlockID returns the latest block ID. func (f *txFollowerImpl) BlockID() flowsdk.Identifier { f.mu.RLock() defer f.mu.RUnlock() @@ -240,6 +242,7 @@ func (f *txFollowerImpl) BlockID() flowsdk.Identifier { return f.blockID } +// Stop stops all followers, notifies existing watches, and returns. func (f *txFollowerImpl) Stop() { f.cancel() <-f.stopped @@ -259,7 +262,8 @@ type nopTxFollower struct { closedCh chan struct{} } -// NewNopTxFollower creates a new follower that tracks the current block height and ID but does not notify on transaction completion. +// NewNopTxFollower creates a new follower that tracks the current block height and ID +// but does not notify on transaction completion. func NewNopTxFollower(ctx context.Context, client access.Client, opts ...followerOption) (TxFollower, error) { f, err := NewTxFollower(ctx, client, opts...) if err != nil { @@ -277,7 +281,7 @@ func NewNopTxFollower(ctx context.Context, client access.Client, opts ...followe return nop, nil } -// CompleteChanByID always returns a closed channel. +// Follow always returns a closed channel. func (nop *nopTxFollower) Follow(ID flowsdk.Identifier) <-chan struct{} { return nop.closedCh } From 21b0c6dba6ba56fb09a4123f26a927f6e48ed019 Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Fri, 8 Jul 2022 15:29:57 -0700 Subject: [PATCH 111/223] consistent naming --- integration/utils/follower.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/integration/utils/follower.go b/integration/utils/follower.go index 7a7295d6817..29a4048908c 100644 --- a/integration/utils/follower.go +++ b/integration/utils/follower.go @@ -177,7 +177,7 @@ Loop: } // Follow returns a channel that will be closed when the transaction is completed. -func (f *txFollowerImpl) Follow(ID flowsdk.Identifier) <-chan struct{} { +func (f *txFollowerImpl) Follow(txID flowsdk.Identifier) <-chan struct{} { f.mu.Lock() defer f.mu.Unlock() @@ -189,23 +189,23 @@ func (f *txFollowerImpl) Follow(ID flowsdk.Identifier) <-chan struct{} { } // Return existing follower if exists. - if txi, ok := f.txToChan[ID]; ok { + if txi, ok := f.txToChan[txID]; ok { return txi.C } // Create new one. ch := make(chan struct{}) - f.txToChan[ID] = txInfo{submisionTime: time.Now(), C: ch} + f.txToChan[txID] = txInfo{submisionTime: time.Now(), C: ch} return ch } -func (f *txFollowerImpl) loadAndDelete(tx flowsdk.Identifier) (txInfo, bool) { +func (f *txFollowerImpl) loadAndDelete(txID flowsdk.Identifier) (txInfo, bool) { f.mu.Lock() defer f.mu.Unlock() - txi, ok := f.txToChan[tx] + txi, ok := f.txToChan[txID] if ok { - delete(f.txToChan, tx) + delete(f.txToChan, txID) } return txi, ok } From 7720b5545eab219ecbb05088958d76f10bd150df Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Fri, 8 Jul 2022 18:45:32 -0400 Subject: [PATCH 112/223] use cancellable context --- .../authorized_sender_validator_test.go | 62 +++++++++++++------ 1 file changed, 42 insertions(+), 20 deletions(-) diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index ec9fc2a53a0..dd3d3b099fe 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -52,12 +52,15 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_AuthorizedSen pid, err := unittest.PeerIDFromFlowID(c.Identity) s.Require().NoError(err) - msgType, err := validate(context.Background(), pid, c.Message) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + msgType, err := validate(ctx, pid, c.Message) s.Require().NoError(err) s.Require().Equal(c.MessageStr, msgType) validatePubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), c.Channel, c.GetIdentity) - pubsubResult := validatePubsub(context.Background(), pid, c.Message) + pubsubResult := validatePubsub(ctx, pid, c.Message) s.Require().Equal(pubsub.ValidationAccept, pubsubResult) }) } @@ -69,16 +72,19 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedS for _, c := range s.unauthorizedSenderTestCases { str := fmt.Sprintf("role (%s) should not be authorized to send message type (%s) on channel (%s)", c.Identity.Role, c.MessageStr, c.Channel) s.Run(str, func() { - validate := AuthorizedSenderValidator(zerolog.Nop(), c.Channel, c.GetIdentity) - pid, err := unittest.PeerIDFromFlowID(c.Identity) s.Require().NoError(err) - msgType, err := validate(context.Background(), pid, c.Message) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + validate := AuthorizedSenderValidator(zerolog.Nop(), c.Channel, c.GetIdentity) + msgType, err := validate(ctx, pid, c.Message) s.Require().ErrorIs(err, message.ErrUnauthorizedRole) s.Require().Equal(c.MessageStr, msgType) validatePubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), c.Channel, c.GetIdentity) - pubsubResult := validatePubsub(context.Background(), pid, c.Message) + pubsubResult := validatePubsub(ctx, pid, c.Message) s.Require().Equal(pubsub.ValidationReject, pubsubResult) }) } @@ -90,17 +96,20 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedM for _, c := range s.unauthorizedMessageOnChannelTestCases { str := fmt.Sprintf("message type (%s) should not be authorized to be sent on channel (%s)", c.MessageStr, c.Channel) s.Run(str, func() { - validate := AuthorizedSenderValidator(zerolog.Nop(), c.Channel, c.GetIdentity) - pid, err := unittest.PeerIDFromFlowID(c.Identity) s.Require().NoError(err) - msgType, err := validate(context.Background(), pid, c.Message) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + validate := AuthorizedSenderValidator(zerolog.Nop(), c.Channel, c.GetIdentity) + + msgType, err := validate(ctx, pid, c.Message) s.Require().ErrorIs(err, message.ErrUnauthorizedMessageOnChannel) s.Require().Equal(c.MessageStr, msgType) validatePubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), c.Channel, c.GetIdentity) - pubsubResult := validatePubsub(context.Background(), pid, c.Message) + pubsubResult := validatePubsub(ctx, pid, c.Message) s.Require().Equal(pubsub.ValidationReject, pubsubResult) }) } @@ -109,6 +118,9 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedM // TestValidatorCallback_ClusterPrefixedChannels checks that the call back returned from AuthorizedSenderValidator correctly // handles cluster prefixed channels during validation. func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ClusterPrefixedChannels() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + identity, _ := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleCollection)) clusterID := flow.Localnet @@ -118,28 +130,31 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ClusterPrefix // validate collection consensus cluster validateCollConsensus := AuthorizedSenderValidator(zerolog.Nop(), channels.ConsensusCluster(clusterID), getIdentityFunc) - msgType, err := validateCollConsensus(context.Background(), pid, &messages.ClusterBlockResponse{}) + msgType, err := validateCollConsensus(ctx, pid, &messages.ClusterBlockResponse{}) s.Require().NoError(err) s.Require().Equal(message.ClusterBlockResponse, msgType) validateCollConsensusPubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), channels.ConsensusCluster(clusterID), getIdentityFunc) - pubsubResult := validateCollConsensusPubsub(context.Background(), pid, &messages.ClusterBlockResponse{}) + pubsubResult := validateCollConsensusPubsub(ctx, pid, &messages.ClusterBlockResponse{}) s.Require().Equal(pubsub.ValidationAccept, pubsubResult) // validate collection sync cluster validateSyncCluster := AuthorizedSenderValidator(zerolog.Nop(), channels.SyncCluster(clusterID), getIdentityFunc) - msgType, err = validateSyncCluster(context.Background(), pid, &messages.SyncRequest{}) + msgType, err = validateSyncCluster(ctx, pid, &messages.SyncRequest{}) s.Require().NoError(err) s.Require().Equal(message.SyncRequest, msgType) validateSyncClusterPubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), channels.SyncCluster(clusterID), getIdentityFunc) - pubsubResult = validateSyncClusterPubsub(context.Background(), pid, &messages.SyncRequest{}) + pubsubResult = validateSyncClusterPubsub(ctx, pid, &messages.SyncRequest{}) s.Require().Equal(pubsub.ValidationAccept, pubsubResult) } // TestValidatorCallback_ValidationFailure checks that the call back returned from AuthorizedSenderValidator returns the expected validation error. func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFailure() { s.Run("sender is ejected", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + identity, _ := unittest.IdentityWithNetworkingKeyFixture() identity.Ejected = true getIdentityFunc := s.getIdentity(identity) @@ -147,16 +162,19 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai s.Require().NoError(err) validate := AuthorizedSenderValidator(zerolog.Nop(), channels.SyncCommittee, getIdentityFunc) - msgType, err := validate(context.Background(), pid, &messages.SyncRequest{}) + msgType, err := validate(ctx, pid, &messages.SyncRequest{}) s.Require().ErrorIs(err, ErrSenderEjected) s.Require().Equal("", msgType) validatePubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), channels.SyncCommittee, getIdentityFunc) - pubsubResult := validatePubsub(context.Background(), pid, &messages.SyncRequest{}) + pubsubResult := validatePubsub(ctx, pid, &messages.SyncRequest{}) s.Require().Equal(pubsub.ValidationReject, pubsubResult) }) s.Run("unknown message type", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + identity, _ := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleConsensus)) type msg struct { *messages.BlockProposal @@ -171,14 +189,15 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai getIdentityFunc := s.getIdentity(identity) pid, err := unittest.PeerIDFromFlowID(identity) s.Require().NoError(err) + validate := AuthorizedSenderValidator(zerolog.Nop(), channels.ConsensusCommittee, getIdentityFunc) validatePubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), channels.ConsensusCommittee, getIdentityFunc) // unknown message types are rejected - msgType, err := validate(context.Background(), pid, m) + msgType, err := validate(ctx, pid, m) s.Require().ErrorIs(err, ErrUnknownMessageType) s.Require().Equal("", msgType) - pubsubResult := validatePubsub(context.Background(), pid, m) + pubsubResult := validatePubsub(ctx, pid, m) s.Require().Equal(pubsub.ValidationReject, pubsubResult) // nil messages are rejected @@ -190,6 +209,9 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai }) s.Run("sender is not staked getIdentityFunc does not return identity ", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + identity, _ := unittest.IdentityWithNetworkingKeyFixture() // getIdentityFunc simulates unstaked node not found in participant list @@ -199,12 +221,12 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai s.Require().NoError(err) validate := AuthorizedSenderValidator(zerolog.Nop(), channels.SyncCommittee, getIdentityFunc) - msgType, err := validate(context.Background(), pid, &messages.SyncRequest{}) + msgType, err := validate(ctx, pid, &messages.SyncRequest{}) s.Require().ErrorIs(err, ErrUnauthorizedSender) s.Require().Equal("", msgType) validatePubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), channels.SyncCommittee, getIdentityFunc) - pubsubResult := validatePubsub(context.Background(), pid, &messages.SyncRequest{}) + pubsubResult := validatePubsub(ctx, pid, &messages.SyncRequest{}) s.Require().Equal(pubsub.ValidationReject, pubsubResult) }) } From 3dac90936ad08f9dd47396507383ab559641c904 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Fri, 8 Jul 2022 18:53:17 -0400 Subject: [PATCH 113/223] remove some details from comment --- network/p2p/middleware.go | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/network/p2p/middleware.go b/network/p2p/middleware.go index 7c3aac104cf..727ddd791d5 100644 --- a/network/p2p/middleware.go +++ b/network/p2p/middleware.go @@ -352,14 +352,10 @@ func (m *Middleware) stop() { // Dispatch should be used whenever guaranteed delivery to a specific target is required. Otherwise, Publish is // a more efficient candidate. // -// The following benign errors can be returned from libp2p: -// - the peer ID for the target node ID cannot be found. -// - the msg size exceeds result returned from unicastMaxMsgSize(msg) -// - the libP2P node fails to publish the message. -// - the libP2P node fails to create the stream. -// - setting write deadline on the stream fails. -// - the gogo protobuf writer fails to write the message. -// - flushing the stream fails. +// The following benign errors can be returned: +// - he peer ID for the target node ID cannot be found. +// - the msg size was too large. +// - failed to send message to peer. // // All errors returned from this function can be considered benign. func (m *Middleware) SendDirect(msg *message.Message, targetID flow.Identifier) (err error) { From 3b7795d7b4aeac6c7450665a69fbdf93158d643e Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Fri, 8 Jul 2022 16:25:41 -0700 Subject: [PATCH 114/223] chore(loader): wait for account creation results --- integration/utils/contLoadGenerator.go | 6 ++-- integration/utils/tx_result.go | 44 ++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 3 deletions(-) create mode 100644 integration/utils/tx_result.go diff --git a/integration/utils/contLoadGenerator.go b/integration/utils/contLoadGenerator.go index 350dee99029..1b47abced1d 100644 --- a/integration/utils/contLoadGenerator.go +++ b/integration/utils/contLoadGenerator.go @@ -276,9 +276,9 @@ func (lg *ContLoadGenerator) createAccounts(num int) error { <-ch log := lg.log.With().Str("tx_id", createAccountTx.ID().String()).Logger() - result, err := lg.flowClient.GetTransactionResult(context.Background(), createAccountTx.ID()) - if err != nil { - return fmt.Errorf("failed to get transactions result: %w", err) + result := GetTransactionResult(context.Background(), lg.flowClient, createAccountTx.ID()) + if result.Error != nil { + return fmt.Errorf("failed to get transactions result: %w", result.Error) } log.Trace().Str("status", result.Status.String()).Msg("account creation tx executed") diff --git a/integration/utils/tx_result.go b/integration/utils/tx_result.go new file mode 100644 index 00000000000..0bc2a15bec6 --- /dev/null +++ b/integration/utils/tx_result.go @@ -0,0 +1,44 @@ +package utils + +import ( + "context" + "fmt" + "time" + + "github.com/sethvargo/go-retry" + + flowsdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go-sdk/access" +) + +// GetTransactionResult waits for the transaction to get into the terminal state and returns the result. +func GetTransactionResult(ctx context.Context, client access.Client, txID flowsdk.Identifier) *flowsdk.TransactionResult { + var b retry.Backoff + b = retry.NewFibonacci(100 * time.Millisecond) + b = retry.WithMaxDuration(60*time.Second, b) + b = retry.WithCappedDuration(10*time.Second, b) + + var result *flowsdk.TransactionResult + err := retry.Do(ctx, b, func(ctx context.Context) (err error) { + result, err = client.GetTransactionResult(ctx, txID) + if err != nil { + return err + } + if result.Error != nil { + return result.Error + } + + switch result.Status { + case flowsdk.TransactionStatusExecuted, flowsdk.TransactionStatusSealed: + return nil + case flowsdk.TransactionStatusPending, flowsdk.TransactionStatusFinalized: + return retry.RetryableError(fmt.Errorf("waiting for transaction execution: %s", txID)) + default: + return fmt.Errorf("unexpected transaction status: %s", result.Status) + } + }) + if err != nil { + return &flowsdk.TransactionResult{Error: err} + } + return result +} From 59b0f59c52dd1c273ecd9b526c428fe389601a93 Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Fri, 8 Jul 2022 17:28:42 -0700 Subject: [PATCH 115/223] go mod tidy --- integration/go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/go.mod b/integration/go.mod index 2c63742bc43..9a089a016fd 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -26,6 +26,7 @@ require ( github.com/plus3it/gorecurcopy v0.0.1 github.com/prometheus/client_golang v1.12.1 github.com/rs/zerolog v1.26.1 + github.com/sethvargo/go-retry v0.2.3 github.com/stretchr/testify v1.7.5 go.uber.org/atomic v1.9.0 google.golang.org/grpc v1.45.0 @@ -230,7 +231,6 @@ require ( github.com/raulk/clock v1.1.0 // indirect github.com/raulk/go-watchdog v1.2.0 // indirect github.com/rivo/uniseg v0.2.1-0.20211004051800-57c86be7915a // indirect - github.com/sethvargo/go-retry v0.2.3 // indirect github.com/shirou/gopsutil/v3 v3.22.2 // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect From 777028355db1b59d5e8dd0a08f7a19e7f3160e19 Mon Sep 17 00:00:00 2001 From: lolpuddle Date: Sat, 9 Jul 2022 01:49:22 -0400 Subject: [PATCH 116/223] lock updates to pool --- engine/access/rpc/backend/connection_factory.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/engine/access/rpc/backend/connection_factory.go b/engine/access/rpc/backend/connection_factory.go index 4e020d50959..4ac8ca55059 100644 --- a/engine/access/rpc/backend/connection_factory.go +++ b/engine/access/rpc/backend/connection_factory.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net" + "sync" "time" lru "github.com/hashicorp/golang-lru" @@ -47,6 +48,7 @@ type ConnectionFactoryImpl struct { ExecutionNodeGRPCTimeout time.Duration ConnectionsCache *lru.Cache CacheSize uint + lock sync.Mutex AccessMetrics module.AccessMetrics } @@ -81,14 +83,16 @@ func (cf *ConnectionFactoryImpl) retrieveConnection(grpcAddress string, timeout } } if conn == nil || conn.GetState() != connectivity.Ready { - // This line ensures that when a connection is renewed, the previously cached connection is evicted and closed - cf.ConnectionsCache.Remove(grpcAddress) var err error conn, err = cf.createConnection(grpcAddress, timeout) if err != nil { return nil, err } + cf.lock.Lock() + // This line ensures that when a connection is renewed, the previously cached connection is evicted and closed + cf.ConnectionsCache.Remove(grpcAddress) cf.ConnectionsCache.Add(grpcAddress, conn) + cf.lock.Unlock() if cf.AccessMetrics != nil { cf.AccessMetrics.TotalConnectionsInPool(uint(cf.ConnectionsCache.Len()), cf.CacheSize) } From 6b9ec65421045114dd4d605187ac20abdb139c44 Mon Sep 17 00:00:00 2001 From: Leo Zhang Date: Sun, 10 Jul 2022 06:25:27 -0230 Subject: [PATCH 117/223] Update state/protocol/util.go Co-authored-by: Alexander Hentschel --- state/protocol/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/state/protocol/util.go b/state/protocol/util.go index 03c87655e52..1cfae703004 100644 --- a/state/protocol/util.go +++ b/state/protocol/util.go @@ -107,7 +107,7 @@ func DecodeSignerIDs(state State, header *flow.Header) ([]flow.Identifier, error return []flow.Identifier{}, nil } snapshot := state.AtBlockID(header.ID()) - members, err := snapshot.Identities(filter.HasRole(flow.RoleConsensus)) + members, err := snapshot.Identities(filter.IsVotingConsensusCommitteeMember) if err != nil { return nil, fmt.Errorf("fail to retrieve identities for block %v: %w", header.ID(), err) } From b7abe11a0ac0def1b5c9418e3c36b799c16a388b Mon Sep 17 00:00:00 2001 From: Leo Zhang Date: Sun, 10 Jul 2022 06:26:50 -0230 Subject: [PATCH 118/223] Update access/handler.go Co-authored-by: Alexander Hentschel --- access/handler.go | 1 + 1 file changed, 1 insertion(+) diff --git a/access/handler.go b/access/handler.go index 79ba31996c6..7faf0b96435 100644 --- a/access/handler.go +++ b/access/handler.go @@ -17,6 +17,7 @@ import ( type Handler struct { api API chain flow.Chain + // TODO: update to Replicas API once active PaceMaker is merged state protocol.State } From b292c581c60fe841738f1d7977d5df8f56ac54f7 Mon Sep 17 00:00:00 2001 From: Leo Zhang Date: Sun, 10 Jul 2022 06:27:25 -0230 Subject: [PATCH 119/223] Update state/protocol/util.go Co-authored-by: Alexander Hentschel --- state/protocol/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/state/protocol/util.go b/state/protocol/util.go index 1cfae703004..b4a877806e5 100644 --- a/state/protocol/util.go +++ b/state/protocol/util.go @@ -100,7 +100,7 @@ func FindGuarantors(state State, guarantee *flow.CollectionGuarantee) ([]flow.Id // DecodeSignerIDs decodes the signer indices from the given block header, and finds the signer identifiers from protocol state // Expected Error returns during normal operations: // * storage.ErrNotFound if block not found for the given header -// * signature.InvalidSignerIndicesError if `signerIndices` does not encode a valid set of consensus committiee +// * signature.InvalidSignerIndicesError if `signerIndices` does not encode a valid subset of the consensus committee func DecodeSignerIDs(state State, header *flow.Header) ([]flow.Identifier, error) { // root block does not have signer indices if header.ParentVoterIndices == nil && header.View == 0 { From 9cf3d8da010729cd16fc1d176056eff6130bd610 Mon Sep 17 00:00:00 2001 From: Leo Zhang Date: Sun, 10 Jul 2022 06:27:45 -0230 Subject: [PATCH 120/223] Update state/protocol/util.go Co-authored-by: Alexander Hentschel --- state/protocol/util.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/state/protocol/util.go b/state/protocol/util.go index b4a877806e5..82e47552956 100644 --- a/state/protocol/util.go +++ b/state/protocol/util.go @@ -101,7 +101,8 @@ func FindGuarantors(state State, guarantee *flow.CollectionGuarantee) ([]flow.Id // Expected Error returns during normal operations: // * storage.ErrNotFound if block not found for the given header // * signature.InvalidSignerIndicesError if `signerIndices` does not encode a valid subset of the consensus committee -func DecodeSignerIDs(state State, header *flow.Header) ([]flow.Identifier, error) { +// TODO: change `protocol.State` to `Replicas` API once active PaceMaker is merged +func DecodeSignerIDs(committee hotstuff.Committee, header *flow.Header) ([]flow.Identifier, error) { // root block does not have signer indices if header.ParentVoterIndices == nil && header.View == 0 { return []flow.Identifier{}, nil From 86f22a3d81a5896c2a07d3190cbb438a701da499 Mon Sep 17 00:00:00 2001 From: Tarak Ben Youssef <50252200+tarakby@users.noreply.github.com> Date: Mon, 11 Jul 2022 11:40:50 -0500 Subject: [PATCH 121/223] Update README.md Update note about setting up the crypto module locally for every new version --- README.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index a5f63a91ea6..27b30a1af4b 100644 --- a/README.md +++ b/README.md @@ -85,11 +85,10 @@ The following table lists all work streams and links to their home directory and At this point, you should be ready to build, test, and run Flow! 🎉 -Note: if there is error about "relic" or "crypto", trying force removing the relic build and reinstall the tools again: +Note: Whenever the crypto module version imported by "go.mod" is updated to a version that was never locally imported before, the crypto dependency needs to be set-up. If not, you should notice errors about "relic" or "crypto". Run the following command to set-up the new module version: ```bash -rm -rf crypto/relic -make install-tools +make crypto_setup_gopath ``` ## Development Workflow From 1eb7d6746afe1c280989d138c334b68d0621fd06 Mon Sep 17 00:00:00 2001 From: Jordan Schalm Date: Mon, 11 Jul 2022 14:42:56 -0700 Subject: [PATCH 122/223] add unit test for unhandled sealing segment case --- state/protocol/badger/snapshot_test.go | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/state/protocol/badger/snapshot_test.go b/state/protocol/badger/snapshot_test.go index bfefd285220..f15622079bc 100644 --- a/state/protocol/badger/snapshot_test.go +++ b/state/protocol/badger/snapshot_test.go @@ -263,6 +263,30 @@ func TestSealingSegment(t *testing.T) { }) }) + // test sealing segment for non-root segment where the latest seal is the + // root seal, but the segment contains more than the root block. + // ROOT <- B1 + // Expected sealing segment: [ROOT, B1] + t.Run("non-root with root seal as latest seal", func(t *testing.T) { + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { + // build an extra block on top of root + block1 := unittest.BlockWithParentFixture(head) + buildBlock(t, state, block1) + + segment, err := state.AtBlockID(block1.ID()).SealingSegment() + require.NoError(t, err) + + // build a valid child B2 to ensure we have a QC + buildBlock(t, state, unittest.BlockWithParentFixture(block1.Header)) + + // sealing segment should contain B1 and B2 + // B2 is reference of snapshot, B1 is latest sealed + unittest.AssertEqualBlocksLenAndOrder(t, []*flow.Block{rootSnapshot.Encodable().SealingSegment.Lowest(), block1}, segment.Blocks) + assert.Len(t, segment.ExecutionResults, 1) + assertSealingSegmentBlocksQueryableAfterBootstrap(t, state.AtBlockID(block1.ID())) + }) + }) + // test sealing segment for non-root segment with simple sealing structure // (no blocks in between reference block and latest sealed) // ROOT <- B1 <- B2(S1) From b2b23fa0ef43283b51524082ba9313e93967fd87 Mon Sep 17 00:00:00 2001 From: Jordan Schalm Date: Mon, 11 Jul 2022 14:50:05 -0700 Subject: [PATCH 123/223] handle multi-block root sealing segment --- model/flow/sealing_segment.go | 160 +++++++++++++++++++---------- model/flow/sealing_segment_test.go | 2 +- 2 files changed, 108 insertions(+), 54 deletions(-) diff --git a/model/flow/sealing_segment.go b/model/flow/sealing_segment.go index feae0878188..e0611e02597 100644 --- a/model/flow/sealing_segment.go +++ b/model/flow/sealing_segment.go @@ -4,22 +4,12 @@ import ( "fmt" ) -const ( - // expected length of a root sealing segment - rootSegmentBlocksLen = 1 - - // expected view of the block in a root sealing segment - rootSegmentBlockView = 0 -) - -// SealingSegment is the chain segment such that the last block (greatest -// height) is this snapshot's reference block and the first (least height) -// is the most recently sealed block as of this snapshot (ie. the block -// referenced by LatestSeal). +// SealingSegment is the chain segment such that the last block (greatest height) +// is this snapshot's reference block and the first (least height) is the most +// recently sealed block as of this snapshot (ie. the block referenced by LatestSeal). // // In other words, the most recently incorporated seal as of the highest block -// references the lowest block. The highest block does not need to contain this -// seal. +// references the lowest block. The highest block does not need to contain this seal. // // Example 1 - E seals A: // A <- B <- C <- D <- E(SA) @@ -30,27 +20,41 @@ const ( // A <- B <- C <- D(SA) <- E // // Example 3 - E contains multiple seals -// B <- C <- D <- E(SA, SB) +// B <- C <- D <- E(SA, SB) +// +// MINIMALITY REQUIREMENT: // Note that block B is the highest sealed block as of E. Therefore, the // sealing segment's lowest block must be B. Essentially, this is a minimality // requirement for the history: it shouldn't be longer than necessary. So // extending the chain segment above to A <- B <- C <- D <- E(SA, SB) would // _not_ yield a valid SealingSegment. // -// Root sealing segments contain only one self-sealing block. All other sealing -// segments contain multiple blocks. -// The segment is in ascending height order. +// ROOT SEALING SEGMENTS: +// Root sealing segments are sealing segments which contain the root block: +// * the root block must be the first block (least height) in the segment +// * no blocks in the segment may contain any seals (by the minimality requirement) +// * it is possible (but not necessary) for root sealing segments to contain only 1 self-sealing block +// +// Example 1 - one self-sealing root block +// ROOT +// The above sealing segment is the form of sealing segments within root snapshots, +// for example those snapshots used to bootstrap a new network, or spork. +// +// Example 2 - one self-sealing root block followed by any number of seal-less blocks +// ROOT <- A <- B +// +// All non-root sealing segments contain more than one block. +// Sealing segments is in ascending height order. // // In addition to storing the blocks within the sealing segment, as defined above, // the SealingSegment structure also stores any resources which are referenced // by blocks in the segment, but not included in the payloads of blocks within // the segment. In particular: -// - results referenced by receipts within segment payloads -// - results referenced by seals within segment payloads -// - seals which represent the latest state commitment as of a segment block -// +// * results referenced by receipts within segment payloads +// * results referenced by seals within segment payloads +// * seals which represent the latest state commitment as of a segment block type SealingSegment struct { - // Blocks contains the chain segment blocks. + // Blocks contains the chain segment blocks in ascending height order. Blocks []*Block // ExecutionResults contains any results which are referenced by receipts @@ -87,10 +91,10 @@ func (segment *SealingSegment) Lowest() *Block { // FinalizedSeal returns the seal that seals the lowest block. // Per specification, this seal must be included in a SealingSegment. -// Under normal operations (where only a validated SealingSegment -// is processed), no error returns are expected. +// The receiver SealingSegment must be validated. +// No errors are expected during normal operation. func (segment *SealingSegment) FinalizedSeal() (*Seal, error) { - if isRootSegment(segment) { + if isRootSegment(segment.LatestSeals) { return segment.FirstSeal, nil } @@ -107,15 +111,11 @@ func (segment *SealingSegment) FinalizedSeal() (*Seal, error) { return seal, nil } -func isRootSegment(segment *SealingSegment) bool { - return len(segment.Blocks) == rootSegmentBlocksLen -} - // Validate validates the sealing segment structure and returns an error if // the segment isn't valid. This is done by re-building the segment from scratch, // re-using the validation logic already present in the SealingSegmentBuilder. -// The node logic requires a valid sealing segment to bootstrap. There are no -// errors expected during normal operations. +// The node logic requires a valid sealing segment to bootstrap. +// No errors are expected during normal operation. func (segment *SealingSegment) Validate() error { // populate lookup of seals and results in the segment to satisfy builder @@ -173,14 +173,15 @@ var ( ErrSegmentInvalidBlockHeight = fmt.Errorf("sealing segment failed sanity check: blocks must be in ascending order") ErrSegmentResultLookup = fmt.Errorf("failed to lookup execution result") ErrSegmentSealLookup = fmt.Errorf("failed to lookup seal") - ErrSegmentInvalidRootView = fmt.Errorf("invalid root sealing segment block view") ) // GetResultFunc is a getter function for results by ID. +// No errors are expected during normal operation. type GetResultFunc func(resultID Identifier) (*ExecutionResult, error) // GetSealByBlockIDFunc is a getter function for seals by block ID, returning // the latest seals incorporated as of the given block. +// No errors are expected during normal operation. type GetSealByBlockIDFunc func(blockID Identifier) (*Seal, error) // SealingSegmentBuilder is a utility for incrementally building a sealing segment. @@ -198,6 +199,7 @@ type SealingSegmentBuilder struct { } // AddBlock appends a block to the sealing segment under construction. +// No errors are expected during normal operation. func (builder *SealingSegmentBuilder) AddBlock(block *Block) error { // sanity check: block should be 1 height higher than current highest if !builder.isValidHeight(block) { @@ -271,6 +273,10 @@ func (builder *SealingSegmentBuilder) addExecutionResult(result *ExecutionResult // SealingSegment completes building the sealing segment, validating the segment // constructed so far, and returning it as a SealingSegment if it is valid. +// +// All errors indicate the SealingSegmentBuilder internal state does not represent +// a valid sealing segment. +// No errors are expected during normal operation. func (builder *SealingSegmentBuilder) SealingSegment() (*SealingSegment, error) { if err := builder.validateSegment(); err != nil { return nil, fmt.Errorf("failed to validate sealing segment: %w", err) @@ -284,7 +290,7 @@ func (builder *SealingSegmentBuilder) SealingSegment() (*SealingSegment, error) }, nil } -// isValidHeight returns true iff block is exactly 1 height higher than the current highest block in the segment +// isValidHeight returns true iff block is exactly 1 height higher than the current highest block in the segment. func (builder *SealingSegmentBuilder) isValidHeight(block *Block) bool { if builder.highest() == nil { return true @@ -293,37 +299,66 @@ func (builder *SealingSegmentBuilder) isValidHeight(block *Block) bool { return block.Header.Height == builder.highest().Header.Height+1 } -// isValidRootSegment will check that the block in the root segment has a view of 0. -func (builder *SealingSegmentBuilder) isValidRootSegment() bool { - return len(builder.blocks) == rootSegmentBlocksLen && - builder.highest().Header.View == rootSegmentBlockView && - len(builder.results) == rootSegmentBlocksLen && // root segment has only 1 result - builder.firstSeal != nil && // first seal is the root seal itself and must exist - builder.results[0].BlockID == builder.blocks[0].ID() && // root result matches the root block - builder.results[0].ID() == builder.firstSeal.ResultID && // root seal matches the root result - builder.results[0].BlockID == builder.firstSeal.BlockID // root seal seals the root block +// validateRootSegment will check that the current builder state represents a valid +// root sealing segment. In particular: +// * the root block must be the first block (least height) in the segment +// * no blocks in the segment may contain any seals (by the minimality requirement) +// +// All errors indicate an invalid root segment, and either a bug in SealingSegmentBuilder +// or a corrupted underlying protocol state. +// No errors are expected during normal operation. +func (builder *SealingSegmentBuilder) validateRootSegment() error { + if len(builder.blocks) == 0 { + return fmt.Errorf("root segment must have at least 1 block") + } + if builder.lowest().Header.View != 0 { + return fmt.Errorf("root block has unexpected view (%d != 0)", builder.lowest().Header.View) + } + if len(builder.results) != 1 { + return fmt.Errorf("expected %d results, got %d", 1, len(builder.results)) + } + if builder.firstSeal == nil { + return fmt.Errorf("firstSeal must not be nil for root segment") + } + if builder.results[0].BlockID != builder.lowest().ID() { + return fmt.Errorf("result (block_id=%x) is not for root block (id=%x)", builder.results[0].BlockID, builder.lowest().ID()) + } + if builder.results[0].ID() != builder.firstSeal.ResultID { + return fmt.Errorf("firstSeal (result_id=%x) is not for root result (id=%x)", builder.firstSeal.ResultID, builder.results[0].ID()) + } + if builder.results[0].BlockID != builder.firstSeal.BlockID { + return fmt.Errorf("root seal (block_id=%x) references different block than root result (block_id=%x)", builder.firstSeal.BlockID, builder.results[0].BlockID) + } + for _, block := range builder.blocks { + if len(block.Payload.Seals) > 0 { + return fmt.Errorf("root segment cannot contain blocks with seals (minimality requirement) - block (height=%d,id=%x) has %d seals", + block.Header.Height, block.ID(), len(block.Payload.Seals)) + } + } + return nil } // validateSegment will validate if builder satisfies conditions for a valid sealing segment. +// No errors are expected during normal operation. func (builder *SealingSegmentBuilder) validateSegment() error { // sealing cannot be empty if len(builder.blocks) == 0 { return fmt.Errorf("expect at least 2 blocks in a sealing segment or 1 block in the case of root segments, but got an empty sealing segment: %w", ErrSegmentBlocksWrongLen) } - // if root sealing segment skip seal sanity check - if len(builder.blocks) == 1 { - if !builder.isValidRootSegment() { - return fmt.Errorf("root sealing segment block has the wrong view got (%d) expected (%d): %w", builder.highest().Header.View, rootSegmentBlockView, ErrSegmentInvalidRootView) + // if root sealing segment, use different validation + if isRootSegment(builder.latestSeals) { + err := builder.validateRootSegment() + if err != nil { + return fmt.Errorf("invalid root segment: %w", err) } - return nil } // validate the latest seal is for the lowest block _, err := findLatestSealForLowestBlock(builder.blocks, builder.latestSeals) if err != nil { - return fmt.Errorf("sealing segment missing seal lowest (%x) highest (%x) %v: %w", builder.lowest().ID(), builder.highest().ID(), err, ErrSegmentMissingSeal) + return fmt.Errorf("sealing segment missing (block_id=%x) highest (block_id%x) %v: %w", builder.lowest().ID(), builder.highest().ID(), err, ErrSegmentMissingSeal) } return nil @@ -355,9 +390,9 @@ func NewSealingSegmentBuilder(resultLookup GetResultFunc, sealLookup GetSealByBl } } -// findLatestSealForLowestBlock finds the latest seal as of the highest block. -// As a sanity check, the method confirms that the latest seal is for lowest block, i.e. -// the sealing segment's history is minimal. +// findLatestSealForLowestBlock finds the seal for the lowest block. +// As a sanity check, the method confirms that this seal is the latest seal as of the highest block. +// In other words, this function checks that the sealing segment's history is minimal. // Inputs: // * `blocks` is the continuous sequence of blocks that form the sealing segment // * `latestSeals` holds for each block the identifier of the latest seal included in the fork as of this block @@ -395,7 +430,7 @@ func findLatestSealForLowestBlock(blocks []*Block, latestSeals map[Identifier]Id } } - // the latest seal must be found in a block that has Seal when traversing blocks + // the latest seal must be found in a block that has a seal when traversing blocks // backwards from higher height to lower height. // otherwise, the sealing segment is invalid if len(block.Payload.Seals) > 0 { @@ -406,3 +441,22 @@ func findLatestSealForLowestBlock(blocks []*Block, latestSeals map[Identifier]Id return nil, fmt.Errorf("invalid segment: seal %v not found", latestSealID) } + +// isRootSegment returns true if the input latestSeals map represents a root segment. +// The implementation makes use of the fact that root sealing segments uniquely +// have the same latest seal, for all blocks in the segment. +func isRootSegment(latestSeals map[Identifier]Identifier) bool { + var rootSealID Identifier + // set root seal ID to the latest seal value for any block in the segment + for _, sealID := range latestSeals { + rootSealID = sealID + break + } + // then, verify all other blocks have the same latest seal + for _, sealID := range latestSeals { + if sealID != rootSealID { + return false + } + } + return true +} diff --git a/model/flow/sealing_segment_test.go b/model/flow/sealing_segment_test.go index d64826ad1e4..00321775074 100644 --- a/model/flow/sealing_segment_test.go +++ b/model/flow/sealing_segment_test.go @@ -285,7 +285,7 @@ func (suite *SealingSegmentSuite) TestBuild_RootSegmentWrongView() { require.NoError(suite.T(), err) _, err = suite.builder.SealingSegment() - require.ErrorIs(suite.T(), err, flow.ErrSegmentInvalidRootView) + require.Error(suite.T(), err) } // Test the case when the highest block in the segment does not contain seals but From f7bdce3a8204e108dc05e28c0b75a3104cf81602 Mon Sep 17 00:00:00 2001 From: Patrick Lee Date: Mon, 11 Jul 2022 15:03:53 -0700 Subject: [PATCH 124/223] s/Rem/Remove/g to improve code readability --- engine/access/ingestion/engine.go | 6 +++--- engine/consensus/matching/core.go | 2 +- engine/execution/ingestion/deltas.go | 6 +++--- engine/execution/ingestion/engine.go | 6 +++--- engine/verification/fetcher/engine.go | 4 ++-- engine/verification/fetcher/engine_test.go | 16 +++++++-------- .../fetcher/execution_fork_test.go | 2 +- module/builder/collection/builder.go | 6 +++--- module/builder/collection/builder_test.go | 2 +- module/finalizer/collection/finalizer.go | 2 +- module/finalizer/collection/finalizer_test.go | 2 +- module/finalizer/consensus/cleanup.go | 4 ++-- module/mempool/assignments.go | 4 ++-- module/mempool/backData.go | 4 ++-- module/mempool/blocks.go | 4 ++-- module/mempool/chunk_data_packs.go | 4 ++-- module/mempool/chunk_requests.go | 6 +++--- module/mempool/chunk_statuses.go | 6 +++--- module/mempool/collections.go | 4 ++-- .../mempool/consensus/exec_fork_suppressor.go | 6 +++--- .../consensus/exec_fork_suppressor_test.go | 20 +++++++++---------- .../consensus/incorporated_result_seals.go | 6 +++--- module/mempool/guarantees.go | 4 ++-- module/mempool/herocache/backdata/cache.go | 8 ++++---- .../mempool/herocache/backdata/cache_test.go | 10 +++++----- .../herocache/backdata/heropool/pool.go | 4 ++-- module/mempool/herocache/dns_cache.go | 12 +++++------ module/mempool/herocache/dns_cache_test.go | 4 ++-- module/mempool/herocache/transactions.go | 6 +++--- module/mempool/herocache/transactions_test.go | 2 +- module/mempool/identifier_map.go | 8 ++++---- module/mempool/identifiers.go | 4 ++-- module/mempool/incorporated_result_seals.go | 4 ++-- module/mempool/mock/assignments.go | 4 ++-- module/mempool/mock/back_data.go | 4 ++-- module/mempool/mock/blocks.go | 4 ++-- module/mempool/mock/chunk_data_packs.go | 4 ++-- module/mempool/mock/chunk_requests.go | 4 ++-- module/mempool/mock/chunk_statuses.go | 4 ++-- module/mempool/mock/collections.go | 4 ++-- module/mempool/mock/deltas.go | 4 ++-- module/mempool/mock/guarantees.go | 4 ++-- module/mempool/mock/identifier_map.go | 8 ++++---- module/mempool/mock/identifiers.go | 4 ++-- .../mempool/mock/incorporated_result_seals.go | 4 ++-- module/mempool/mock/pending_receipts.go | 4 ++-- module/mempool/mock/results.go | 4 ++-- module/mempool/mock/transaction_timings.go | 4 ++-- module/mempool/mock/transactions.go | 4 ++-- module/mempool/pending_receipts.go | 2 +- module/mempool/results.go | 4 ++-- module/mempool/state_deltas.go | 4 ++-- module/mempool/stdmap/assignments.go | 6 +++--- .../stdmap/backDataHeapBenchmark_test.go | 6 +++--- module/mempool/stdmap/backdata/mapBackData.go | 4 ++-- module/mempool/stdmap/backend.go | 10 +++++----- module/mempool/stdmap/backend_test.go | 4 ++-- module/mempool/stdmap/chunk_data_packs.go | 6 +++--- module/mempool/stdmap/chunk_requests.go | 10 +++++----- module/mempool/stdmap/chunk_statuses.go | 8 ++++---- module/mempool/stdmap/collections.go | 6 +++--- module/mempool/stdmap/eject.go | 2 +- module/mempool/stdmap/guarantees_test.go | 2 +- module/mempool/stdmap/identifier_map.go | 14 ++++++------- module/mempool/stdmap/identifier_map_test.go | 10 +++++----- module/mempool/stdmap/identifiers.go | 6 +++--- .../stdmap/incorporated_result_seals.go | 8 ++++---- .../stdmap/incorporated_result_seals_test.go | 16 +++++++-------- module/mempool/stdmap/pending_receipts.go | 6 +++--- .../mempool/stdmap/pending_receipts_test.go | 8 ++++---- module/mempool/stdmap/receipts.go | 6 +++--- module/mempool/stdmap/receipts_test.go | 2 +- module/mempool/stdmap/results.go | 6 +++--- module/mempool/stdmap/times.go | 6 +++--- module/mempool/stdmap/times_test.go | 2 +- module/mempool/stdmap/transaction_timings.go | 6 +++--- .../stdmap/transaction_timings_test.go | 2 +- module/mempool/stdmap/transactions_test.go | 2 +- module/mempool/transaction_timings.go | 4 ++-- module/mempool/transactions.go | 4 ++-- module/metrics/transaction.go | 6 +++--- 81 files changed, 222 insertions(+), 222 deletions(-) diff --git a/engine/access/ingestion/engine.go b/engine/access/ingestion/engine.go index 1c78d87a0fb..bb51bc2d584 100644 --- a/engine/access/ingestion/engine.go +++ b/engine/access/ingestion/engine.go @@ -415,7 +415,7 @@ func (e *Engine) trackFinalizedMetricForBlock(hb *model.Block) { if ti, found := e.blocksToMarkExecuted.ByID(hb.BlockID); found { e.trackExecutedMetricForBlock(block, ti) - e.blocksToMarkExecuted.Rem(hb.BlockID) + e.blocksToMarkExecuted.Remove(hb.BlockID) } } @@ -491,14 +491,14 @@ func (e *Engine) handleCollection(originID flow.Identifier, entity flow.Entity) for _, t := range light.Transactions { e.transactionMetrics.TransactionFinalized(t, ti) } - e.collectionsToMarkFinalized.Rem(light.ID()) + e.collectionsToMarkFinalized.Remove(light.ID()) } if ti, found := e.collectionsToMarkExecuted.ByID(light.ID()); found { for _, t := range light.Transactions { e.transactionMetrics.TransactionExecuted(t, ti) } - e.collectionsToMarkExecuted.Rem(light.ID()) + e.collectionsToMarkExecuted.Remove(light.ID()) } // FIX: we can't index guarantees here, as we might have more than one block diff --git a/engine/consensus/matching/core.go b/engine/consensus/matching/core.go index 1d7e654fce4..865b33b2297 100644 --- a/engine/consensus/matching/core.go +++ b/engine/consensus/matching/core.go @@ -120,7 +120,7 @@ func (c *Core) ProcessReceipt(receipt *flow.ExecutionReceipt) error { } childReceipts := c.pendingReceipts.ByPreviousResultID(resultID) - c.pendingReceipts.Rem(receipt.ID()) + c.pendingReceipts.Remove(receipt.ID()) for _, childReceipt := range childReceipts { // recursively processing the child receipts diff --git a/engine/execution/ingestion/deltas.go b/engine/execution/ingestion/deltas.go index 0f1b4518e1e..2fd1636db65 100644 --- a/engine/execution/ingestion/deltas.go +++ b/engine/execution/ingestion/deltas.go @@ -25,9 +25,9 @@ func (s *Deltas) Add(delta *messages.ExecutionStateDelta) bool { return s.Backend.Add(delta) } -// Rem will remove a deltas by block ID. -func (s *Deltas) Rem(blockID flow.Identifier) bool { - removed := s.Backend.Rem(blockID) +// Remove will remove a deltas by block ID. +func (s *Deltas) Remove(blockID flow.Identifier) bool { + removed := s.Backend.Remove(blockID) return removed } diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index b15010f7afd..d318ca17f30 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -719,7 +719,7 @@ func (e *Engine) onBlockExecuted(executed *entity.ExecutableBlock, finalState fl } // remove the executed block - executionQueues.Rem(executed.ID()) + executionQueues.Remove(executed.ID()) return nil }) @@ -761,7 +761,7 @@ func (e *Engine) executeBlockIfComplete(eb *entity.ExecutableBlock) bool { // Hex("delta_start_state", delta.ExecutableBlock.StartState). // Msg("can not apply the state delta, the start state does not match") // - // e.syncDeltas.Rem(eb.Block.ID()) + // e.syncDeltas.Remove(eb.Block.ID()) // } // if don't have the delta, then check if everything is ready for executing @@ -870,7 +870,7 @@ func (e *Engine) handleCollection(originID flow.Identifier, collection *flow.Col // this also prevents from executing the same block twice, because the second // time when the collection arrives, it will not be found in the blockByCollectionID // index. - backdata.Rem(collID) + backdata.Remove(collID) return nil }, diff --git a/engine/verification/fetcher/engine.go b/engine/verification/fetcher/engine.go index 6d3017da14e..a53ea354817 100644 --- a/engine/verification/fetcher/engine.go +++ b/engine/verification/fetcher/engine.go @@ -316,7 +316,7 @@ func (e *Engine) handleValidatedChunkDataPack(ctx context.Context, status *verification.ChunkStatus, chunkDataPack *flow.ChunkDataPack) (bool, error) { - removed := e.pendingChunks.Rem(status.ChunkIndex, status.ExecutionResult.ID()) + removed := e.pendingChunks.Remove(status.ChunkIndex, status.ExecutionResult.ID()) if !removed { // we deduplicate the chunk data responses at this point, reaching here means a // duplicate chunk data response is under process concurrently, so we give up @@ -481,7 +481,7 @@ func (e *Engine) NotifyChunkDataPackSealed(chunkIndex uint64, resultID flow.Iden lg = lg.With(). Uint64("block_height", status.BlockHeight). Hex("result_id", logging.ID(status.ExecutionResult.ID())).Logger() - removed := e.pendingChunks.Rem(chunkIndex, resultID) + removed := e.pendingChunks.Remove(chunkIndex, resultID) e.chunkConsumerNotifier.Notify(chunkLocatorID) lg.Info(). diff --git a/engine/verification/fetcher/engine_test.go b/engine/verification/fetcher/engine_test.go index 77ae0a6edca..4ccc5407e17 100644 --- a/engine/verification/fetcher/engine_test.go +++ b/engine/verification/fetcher/engine_test.go @@ -137,7 +137,7 @@ func testProcessAssignChunkHappyPath(t *testing.T, chunkNum int, assignedNum int mockResultsByIDs(s.results, []*flow.ExecutionResult{result}) mockBlocksStorage(s.blocks, s.headers, block) mockPendingChunksAdd(t, s.pendingChunks, statuses, true) - mockPendingChunksRem(t, s.pendingChunks, statuses, true) + mockPendingChunksRemove(t, s.pendingChunks, statuses, true) mockPendingChunksGet(s.pendingChunks, statuses) mockStateAtBlockIDForIdentities(s.state, block.ID(), agrees.Union(disagrees)) @@ -201,7 +201,7 @@ func TestChunkResponse_RemovingStatusFails(t *testing.T) { chunkLocatorID := statuses[0].ChunkLocatorID() // trying to remove the pending status fails. - mockPendingChunksRem(t, s.pendingChunks, statuses, false) + mockPendingChunksRemove(t, s.pendingChunks, statuses, false) chunkDataPacks, _ := verifiableChunksFixture(t, statuses, block, result, collMap) @@ -235,7 +235,7 @@ func TestProcessAssignChunkSealedAfterRequest(t *testing.T) { // mocks resources on fetcher engine side. mockResultsByIDs(s.results, []*flow.ExecutionResult{result}) mockPendingChunksAdd(t, s.pendingChunks, statuses, true) - mockPendingChunksRem(t, s.pendingChunks, statuses, true) + mockPendingChunksRemove(t, s.pendingChunks, statuses, true) mockPendingChunksGet(s.pendingChunks, statuses) mockStateAtBlockIDForIdentities(s.state, block.ID(), agrees.Union(disagrees)) @@ -395,7 +395,7 @@ func testInvalidChunkDataResponse(t *testing.T, // none of the subsequent calls on the pipeline path should happen upon validation fails. s.results.AssertNotCalled(t, "ByID") - s.pendingChunks.AssertNotCalled(t, "Rem") + s.pendingChunks.AssertNotCalled(t, "Remove") } // TestChunkResponse_MissingStatus evaluates that if the fetcher engine receives a chunk data pack response for which @@ -432,7 +432,7 @@ func TestChunkResponse_MissingStatus(t *testing.T) { // none of the subsequent calls on the pipeline path should happen. s.results.AssertNotCalled(t, "ByID") s.blocks.AssertNotCalled(t, "ByID") - s.pendingChunks.AssertNotCalled(t, "Rem") + s.pendingChunks.AssertNotCalled(t, "Remove") s.state.AssertNotCalled(t, "AtBlockID") } @@ -573,13 +573,13 @@ func mockPendingChunksAdd(t *testing.T, pendingChunks *mempool.ChunkStatuses, li }).Return(added).Times(len(list)) } -// mockPendingChunksRem mocks the remove method of pending chunks for expecting only the specified list of chunk statuses. +// mockPendingChunksRemove mocks the remove method of pending chunks for expecting only the specified list of chunk statuses. // Each chunk status should be removed only once. // It should return the specified added boolean variable as the result of mocking. -func mockPendingChunksRem(t *testing.T, pendingChunks *mempool.ChunkStatuses, list []*verification.ChunkStatus, removed bool) { +func mockPendingChunksRemove(t *testing.T, pendingChunks *mempool.ChunkStatuses, list []*verification.ChunkStatus, removed bool) { mu := &sync.Mutex{} - pendingChunks.On("Rem", mock.Anything, mock.Anything). + pendingChunks.On("Remove", mock.Anything, mock.Anything). Run(func(args mock.Arguments) { // to provide mutual exclusion under concurrent invocations. mu.Lock() diff --git a/engine/verification/fetcher/execution_fork_test.go b/engine/verification/fetcher/execution_fork_test.go index 0f3d7a47e9e..01134ec1bdd 100644 --- a/engine/verification/fetcher/execution_fork_test.go +++ b/engine/verification/fetcher/execution_fork_test.go @@ -45,7 +45,7 @@ func TestExecutionForkWithDuplicateAssignedChunks(t *testing.T) { mockResultsByIDs(s.results, []*flow.ExecutionResult{resultA, resultB}) mockBlocksStorage(s.blocks, s.headers, block) mockPendingChunksAdd(t, s.pendingChunks, assignedChunkStatuses, true) - mockPendingChunksRem(t, s.pendingChunks, assignedChunkStatuses, true) + mockPendingChunksRemove(t, s.pendingChunks, assignedChunkStatuses, true) mockPendingChunksGet(s.pendingChunks, assignedChunkStatuses) // fetcher engine must create a chunk data request for each of chunk statusA and statusB diff --git a/module/builder/collection/builder.go b/module/builder/collection/builder.go index 645ed7cc2a4..9aded4b8b18 100644 --- a/module/builder/collection/builder.go +++ b/module/builder/collection/builder.go @@ -244,14 +244,14 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er } if blockFinalizedAtReferenceHeight.ID() != tx.ReferenceBlockID { // the transaction references an orphaned block - it will never be valid - b.transactions.Rem(tx.ID()) + b.transactions.Remove(tx.ID()) continue } // ensure the reference block is not too old if refHeader.Height < minPossibleRefHeight { // the transaction is expired, it will never be valid - b.transactions.Rem(tx.ID()) + b.transactions.Remove(tx.ID()) continue } @@ -264,7 +264,7 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er // check that the transaction was not already included in finalized history. if lookup.isFinalizedAncestor(txID) { // remove from mempool, conflicts with finalized block will never be valid - b.transactions.Rem(txID) + b.transactions.Remove(txID) continue } diff --git a/module/builder/collection/builder_test.go b/module/builder/collection/builder_test.go index a1044926c48..a5e54bb56ad 100644 --- a/module/builder/collection/builder_test.go +++ b/module/builder/collection/builder_test.go @@ -167,7 +167,7 @@ func (suite *BuilderSuite) ProtoStateRoot() *flow.Header { func (suite *BuilderSuite) ClearPool() { // TODO use Clear() for _, tx := range suite.pool.All() { - suite.pool.Rem(tx.ID()) + suite.pool.Remove(tx.ID()) } } diff --git a/module/finalizer/collection/finalizer.go b/module/finalizer/collection/finalizer.go index 1f87950e560..71b359f0a94 100644 --- a/module/finalizer/collection/finalizer.go +++ b/module/finalizer/collection/finalizer.go @@ -116,7 +116,7 @@ func (f *Finalizer) MakeFinal(blockID flow.Identifier) error { for _, colTx := range payload.Collection.Transactions { txID := colTx.ID() // ignore result -- we don't care whether the transaction was in the pool - _ = f.transactions.Rem(txID) + _ = f.transactions.Remove(txID) } // finalize the block in cluster state diff --git a/module/finalizer/collection/finalizer_test.go b/module/finalizer/collection/finalizer_test.go index 8dd1039560e..cd21963856c 100644 --- a/module/finalizer/collection/finalizer_test.go +++ b/module/finalizer/collection/finalizer_test.go @@ -47,7 +47,7 @@ func TestFinalizer(t *testing.T) { require.Nil(t, err) // clear the mempool for _, tx := range pool.All() { - pool.Rem(tx.ID()) + pool.Remove(tx.ID()) } } diff --git a/module/finalizer/consensus/cleanup.go b/module/finalizer/consensus/cleanup.go index 3ba48a37ecb..75707d6577b 100644 --- a/module/finalizer/consensus/cleanup.go +++ b/module/finalizer/consensus/cleanup.go @@ -35,13 +35,13 @@ func CleanupMempools( } for _, guarantee := range payload.Guarantees { - _ = guarantees.Rem(guarantee.ID()) + _ = guarantees.Remove(guarantee.ID()) } collector.MempoolEntries(metrics.ResourceGuarantee, guarantees.Size()) for _, seal := range payload.Seals { - _ = seals.Rem(seal.ID()) + _ = seals.Remove(seal.ID()) } collector.MempoolEntries(metrics.ResourceSeal, seals.Size()) diff --git a/module/mempool/assignments.go b/module/mempool/assignments.go index 4d5521720a9..0c1b934804c 100644 --- a/module/mempool/assignments.go +++ b/module/mempool/assignments.go @@ -17,9 +17,9 @@ type Assignments interface { // false if it was already in the mempool. Add(assignmentFingerprint flow.Identifier, assignment *chunkmodels.Assignment) bool - // Rem will remove the given Assignment from the memory pool; it will + // Remove will remove the given Assignment from the memory pool; it will // return true if the Assignment was known and removed. - Rem(assignmentID flow.Identifier) bool + Remove(assignmentID flow.Identifier) bool // ByID retrieve the chunk assigment with the given ID from the memory pool. // It will return false if it was not found in the mempool. diff --git a/module/mempool/backData.go b/module/mempool/backData.go index dfb77db5539..51e3cc2a023 100644 --- a/module/mempool/backData.go +++ b/module/mempool/backData.go @@ -16,8 +16,8 @@ type BackData interface { // Add adds the given entity to the backdata. Add(entityID flow.Identifier, entity flow.Entity) bool - // Rem removes the entity with the given identifier. - Rem(entityID flow.Identifier) (flow.Entity, bool) + // Remove removes the entity with the given identifier. + Remove(entityID flow.Identifier) (flow.Entity, bool) // Adjust adjusts the entity using the given function if the given identifier can be found. // Returns a bool which indicates whether the entity was updated as well as the updated entity. diff --git a/module/mempool/blocks.go b/module/mempool/blocks.go index 39360fbb2d5..a91c65b9f29 100644 --- a/module/mempool/blocks.go +++ b/module/mempool/blocks.go @@ -17,9 +17,9 @@ type Blocks interface { // false if it was already in the mempool. Add(block *flow.Block) bool - // Rem will remove the given block from the memory pool; it will + // Remove will remove the given block from the memory pool; it will // will return true if the block was known and removed. - Rem(blockID flow.Identifier) bool + Remove(blockID flow.Identifier) bool // ByID retrieve the block with the given ID from the memory pool. // It will return false if it was not found in the mempool. diff --git a/module/mempool/chunk_data_packs.go b/module/mempool/chunk_data_packs.go index 312dcf56f1c..79fc0d68362 100644 --- a/module/mempool/chunk_data_packs.go +++ b/module/mempool/chunk_data_packs.go @@ -17,9 +17,9 @@ type ChunkDataPacks interface { // false if it was already in the mempool. Add(cdp *flow.ChunkDataPack) bool - // Rem will remove the given ChunkDataPack from the memory pool; it will + // Remove will remove the given ChunkDataPack from the memory pool; it will // return true if the ChunkDataPack was known and removed. - Rem(chunkID flow.Identifier) bool + Remove(chunkID flow.Identifier) bool // ByID retrieve the chunk datapacke with the given chunk ID from the memory // pool. It will return false if it was not found in the mempool. diff --git a/module/mempool/chunk_requests.go b/module/mempool/chunk_requests.go index e28db6b67d7..f4a0ef82b48 100644 --- a/module/mempool/chunk_requests.go +++ b/module/mempool/chunk_requests.go @@ -63,10 +63,10 @@ type ChunkRequests interface { // chunk ID in the memory. Otherwise, it aborts the insertion and returns false. Add(request *verification.ChunkDataPackRequest) bool - // Rem provides deletion functionality from the memory pool. - // If there is a chunk request with this ID, Rem removes it and returns true. + // Remove provides deletion functionality from the memory pool. + // If there is a chunk request with this ID, Remove removes it and returns true. // Otherwise, it returns false. - Rem(chunkID flow.Identifier) bool + Remove(chunkID flow.Identifier) bool // PopAll atomically returns all locators associated with this chunk ID while clearing out the // chunk request status for this chunk id. diff --git a/module/mempool/chunk_statuses.go b/module/mempool/chunk_statuses.go index 616db4355d5..6068554abe1 100644 --- a/module/mempool/chunk_statuses.go +++ b/module/mempool/chunk_statuses.go @@ -17,11 +17,11 @@ type ChunkStatuses interface { // chunk ID in the memory. Otherwise, it aborts the insertion and returns false. Add(status *verification.ChunkStatus) bool - // Rem provides deletion functionality from the memory pool based on the pair of + // Remove provides deletion functionality from the memory pool based on the pair of // chunk index and result id. - // If there is a chunk status associated with this pair, Rem removes it and returns true. + // If there is a chunk status associated with this pair, Remove removes it and returns true. // Otherwise, it returns false. - Rem(chunkIndex uint64, resultID flow.Identifier) bool + Remove(chunkIndex uint64, resultID flow.Identifier) bool // All returns all chunk statuses stored in this memory pool. All() []*verification.ChunkStatus diff --git a/module/mempool/collections.go b/module/mempool/collections.go index f7a5a133a92..c9bd7e08eea 100644 --- a/module/mempool/collections.go +++ b/module/mempool/collections.go @@ -17,9 +17,9 @@ type Collections interface { // false if it was already in the mempool. Add(coll *flow.Collection) bool - // Rem will remove the given collection from the memory pool; it will + // Remove will remove the given collection from the memory pool; it will // return true if the collection was known and removed. - Rem(collID flow.Identifier) bool + Remove(collID flow.Identifier) bool // ByID retrieve the collection with the given ID from the memory pool. // It will return false if it was not found in the mempool. diff --git a/module/mempool/consensus/exec_fork_suppressor.go b/module/mempool/consensus/exec_fork_suppressor.go index 0d767265d70..b31b8d65801 100644 --- a/module/mempool/consensus/exec_fork_suppressor.go +++ b/module/mempool/consensus/exec_fork_suppressor.go @@ -182,14 +182,14 @@ func (s *ExecForkSuppressor) ByID(identifier flow.Identifier) (*flow.Incorporate return s.seals.ByID(identifier) } -// Rem removes the IncorporatedResultSeal with id from the mempool -func (s *ExecForkSuppressor) Rem(id flow.Identifier) bool { +// Remove removes the IncorporatedResultSeal with id from the mempool +func (s *ExecForkSuppressor) Remove(id flow.Identifier) bool { s.mutex.Lock() defer s.mutex.Unlock() seal, found := s.seals.ByID(id) if found { - s.seals.Rem(id) + s.seals.Remove(id) set, found := s.sealsForBlock[seal.Seal.BlockID] if !found { // In the current implementation, this cannot happen, as every entity in the mempool is also contained in sealsForBlock. diff --git a/module/mempool/consensus/exec_fork_suppressor_test.go b/module/mempool/consensus/exec_fork_suppressor_test.go index 306ed22d8da..3f9e8e7d52f 100644 --- a/module/mempool/consensus/exec_fork_suppressor_test.go +++ b/module/mempool/consensus/exec_fork_suppressor_test.go @@ -133,11 +133,11 @@ func Test_Add(t *testing.T) { }) } -// Test_Rem checks that ExecForkSuppressor.Rem() +// Test_Remove checks that ExecForkSuppressor.Remove() // * delegates the call to the underlying mempool -func Test_Rem(t *testing.T) { +func Test_Remove(t *testing.T) { WithExecStateForkSuppressor(t, func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActorMock) { - // element is in wrapped mempool: Rem should be called + // element is in wrapped mempool: Remove should be called seal := unittest.IncorporatedResultSeal.Fixture() wrappedMempool.On("Add", seal).Return(true, nil).Once() wrappedMempool.On("ByID", seal.ID()).Return(seal, true) @@ -146,16 +146,16 @@ func Test_Rem(t *testing.T) { assert.True(t, added) wrappedMempool.On("ByID", seal.ID()).Return(seal, true) - wrappedMempool.On("Rem", seal.ID()).Return(true).Once() - removed := wrapper.Rem(seal.ID()) + wrappedMempool.On("Remove", seal.ID()).Return(true).Once() + removed := wrapper.Remove(seal.ID()) require.Equal(t, true, removed) wrappedMempool.AssertExpectations(t) - // element _not_ in wrapped mempool: Rem might be called + // element _not_ in wrapped mempool: Remove might be called seal = unittest.IncorporatedResultSeal.Fixture() wrappedMempool.On("ByID", seal.ID()).Return(seal, false) - wrappedMempool.On("Rem", seal.ID()).Return(false).Maybe() - removed = wrapper.Rem(seal.ID()) + wrappedMempool.On("Remove", seal.ID()).Return(false).Maybe() + removed = wrapper.Remove(seal.ID()) require.Equal(t, false, removed) wrappedMempool.AssertExpectations(t) }) @@ -280,10 +280,10 @@ func Test_ForkDetectionPersisted(t *testing.T) { }) } -// Test_AddRem_SmokeTest tests a real system of stdmap.IncorporatedResultSeals mempool +// Test_AddRemove_SmokeTest tests a real system of stdmap.IncorporatedResultSeals mempool // which is wrapped in an ExecForkSuppressor. // We add and remove lots of different seals. -func Test_AddRem_SmokeTest(t *testing.T) { +func Test_AddRemove_SmokeTest(t *testing.T) { onExecFork := func([]*flow.IncorporatedResultSeal) { assert.Fail(t, "no call to onExecFork expected ") } diff --git a/module/mempool/consensus/incorporated_result_seals.go b/module/mempool/consensus/incorporated_result_seals.go index a0cd976628a..88069d5a68b 100644 --- a/module/mempool/consensus/incorporated_result_seals.go +++ b/module/mempool/consensus/incorporated_result_seals.go @@ -87,9 +87,9 @@ func (ir *IncorporatedResultSeals) Limit() uint { return ir.seals.Limit() } -// Rem removes an IncorporatedResultSeal from the mempool -func (ir *IncorporatedResultSeals) Rem(id flow.Identifier) bool { - return ir.seals.Rem(id) +// Remove removes an IncorporatedResultSeal from the mempool +func (ir *IncorporatedResultSeals) Remove(id flow.Identifier) bool { + return ir.seals.Remove(id) } // Size returns the number of items in the mempool diff --git a/module/mempool/guarantees.go b/module/mempool/guarantees.go index 9ef780d515a..19050864eb1 100644 --- a/module/mempool/guarantees.go +++ b/module/mempool/guarantees.go @@ -17,9 +17,9 @@ type Guarantees interface { // return false if it was already in the mempool. Add(guarantee *flow.CollectionGuarantee) bool - // Rem will remove the given collection guarantees from the memory pool; it + // Remove will remove the given collection guarantees from the memory pool; it // will return true if the collection guarantees was known and removed. - Rem(collID flow.Identifier) bool + Remove(collID flow.Identifier) bool // ByID retrieve the collection guarantee with the given ID from the memory // pool. It will return false if it was not found in the mempool. diff --git a/module/mempool/herocache/backdata/cache.go b/module/mempool/herocache/backdata/cache.go index 9ed3d52b53f..cc15995f78b 100644 --- a/module/mempool/herocache/backdata/cache.go +++ b/module/mempool/herocache/backdata/cache.go @@ -151,8 +151,8 @@ func (c *Cache) Add(entityID flow.Identifier, entity flow.Entity) bool { return c.put(entityID, entity) } -// Rem removes the entity with the given identifier. -func (c *Cache) Rem(entityID flow.Identifier) (flow.Entity, bool) { +// Remove removes the entity with the given identifier. +func (c *Cache) Remove(entityID flow.Identifier) (flow.Entity, bool) { defer c.logTelemetry() entity, bucketIndex, sliceIndex, exists := c.get(entityID) @@ -173,7 +173,7 @@ func (c *Cache) Rem(entityID flow.Identifier) (flow.Entity, bool) { func (c *Cache) Adjust(entityID flow.Identifier, f func(flow.Entity) flow.Entity) (flow.Entity, bool) { defer c.logTelemetry() - entity, removed := c.Rem(entityID) + entity, removed := c.Remove(entityID) if !removed { return nil, false } @@ -468,5 +468,5 @@ func (c *Cache) unuseSlot(b bucketIndex, s slotIndex) { // invalidateEntity removes the entity linked to the specified slot from the underlying entities // list. So that entity slot is made available to take if needed. func (c *Cache) invalidateEntity(b bucketIndex, s slotIndex) { - c.entities.Rem(c.buckets[b].slots[s].entityIndex) + c.entities.Remove(c.buckets[b].slots[s].entityIndex) } diff --git a/module/mempool/herocache/backdata/cache_test.go b/module/mempool/herocache/backdata/cache_test.go index 691941a954a..8f7d87310ea 100644 --- a/module/mempool/herocache/backdata/cache_test.go +++ b/module/mempool/herocache/backdata/cache_test.go @@ -311,8 +311,8 @@ func TestArrayBackData_All(t *testing.T) { } } -// TestArrayBackData_Rem checks correctness of removing elements from Cache. -func TestArrayBackData_Rem(t *testing.T) { +// TestArrayBackData_Remove checks correctness of removing elements from Cache. +func TestArrayBackData_Remove(t *testing.T) { tt := []struct { limit uint32 items uint32 @@ -416,7 +416,7 @@ func testRemoveAtRandom(t *testing.T, bd *Cache, entities []*unittest.MockEntity for removedCount := 0; removedCount < count; { unittest.RequireReturnsBefore(t, func() { index := rand.Int() % len(entities) - expected, removed := bd.Rem(entities[index].ID()) + expected, removed := bd.Remove(entities[index].ID()) if !removed { return } @@ -431,7 +431,7 @@ func testRemoveAtRandom(t *testing.T, bd *Cache, entities []*unittest.MockEntity // testRemoveRange is a test helper that removes specified range of entities from Cache. func testRemoveRange(t *testing.T, bd *Cache, entities []*unittest.MockEntity, from int, to int) { for i := from; i < to; i++ { - expected, removed := bd.Rem(entities[i].ID()) + expected, removed := bd.Remove(entities[i].ID()) require.True(t, removed) require.Equal(t, entities[i], expected) // size sanity check after removal @@ -443,7 +443,7 @@ func testRemoveRange(t *testing.T, bd *Cache, entities []*unittest.MockEntity, f func testCheckRangeRemoved(t *testing.T, bd *Cache, entities []*unittest.MockEntity, from int, to int) { for i := from; i < to; i++ { // both removal and retrieval must fail - expected, removed := bd.Rem(entities[i].ID()) + expected, removed := bd.Remove(entities[i].ID()) require.False(t, removed) require.Nil(t, expected) diff --git a/module/mempool/herocache/backdata/heropool/pool.go b/module/mempool/herocache/backdata/heropool/pool.go index 0aef63aac84..602372224be 100644 --- a/module/mempool/herocache/backdata/heropool/pool.go +++ b/module/mempool/herocache/backdata/heropool/pool.go @@ -233,8 +233,8 @@ func (p *Pool) claimFreeHead() EIndex { return oldFreeHeadIndex } -// Rem removes entity corresponding to given getSliceIndex from the list. -func (p *Pool) Rem(sliceIndex EIndex) { +// Remove removes entity corresponding to given getSliceIndex from the list. +func (p *Pool) Remove(sliceIndex EIndex) { p.invalidateEntityAtIndex(sliceIndex) } diff --git a/module/mempool/herocache/dns_cache.go b/module/mempool/herocache/dns_cache.go index 8d4fbb65475..db4c9a9b67b 100644 --- a/module/mempool/herocache/dns_cache.go +++ b/module/mempool/herocache/dns_cache.go @@ -106,12 +106,12 @@ func (d *DNSCache) GetTxtRecord(domain string) (*mempool.TxtRecord, bool) { // RemoveIp removes an ip domain from cache. func (d *DNSCache) RemoveIp(domain string) bool { - return d.ipCache.Rem(domainToIdentifier(domain)) + return d.ipCache.Remove(domainToIdentifier(domain)) } // RemoveTxt removes a txt record from cache. func (d *DNSCache) RemoveTxt(domain string) bool { - return d.txtCache.Rem(domainToIdentifier(domain)) + return d.txtCache.Remove(domainToIdentifier(domain)) } // LockIPDomain locks an ip address dns record if exists in the cache. @@ -143,7 +143,7 @@ func (d *DNSCache) LockIPDomain(domain string) (bool, error) { record.Locked = true - if _, removed := backdata.Rem(id); !removed { + if _, removed := backdata.Remove(id); !removed { return fmt.Errorf("ip record could not be removed from backdata") } @@ -164,7 +164,7 @@ func (d *DNSCache) UpdateIPDomain(domain string, addresses []net.IPAddr, timesta id := domainToIdentifier(domain) // removes old entry if exists. - backdata.Rem(id) + backdata.Remove(id) ipRecord := ipEntity{ IpRecord: mempool.IpRecord{ @@ -190,7 +190,7 @@ func (d *DNSCache) UpdateTxtRecord(txt string, records []string, timestamp int64 id := domainToIdentifier(txt) // removes old entry if exists. - backdata.Rem(id) + backdata.Remove(id) txtRecord := txtEntity{ TxtRecord: mempool.TxtRecord{ @@ -239,7 +239,7 @@ func (d *DNSCache) LockTxtRecord(txt string) (bool, error) { record.Locked = true - if _, removed := backdata.Rem(id); !removed { + if _, removed := backdata.Remove(id); !removed { return fmt.Errorf("txt record could not be removed from backdata") } diff --git a/module/mempool/herocache/dns_cache_test.go b/module/mempool/herocache/dns_cache_test.go index 56aa0452b70..0d08fb459ad 100644 --- a/module/mempool/herocache/dns_cache_test.go +++ b/module/mempool/herocache/dns_cache_test.go @@ -224,8 +224,8 @@ func testAddToCache(t *testing.T, } } -// TestDNSCache_Rem checks the correctness of cache against removal. -func TestDNSCache_Rem(t *testing.T) { +// TestDNSCache_Remove checks the correctness of cache against removal. +func TestDNSCache_Remove(t *testing.T) { total := 30 // total entries to store (i.e., 700 ip domains and 700 txt records) sizeLimit := uint32(500) // cache size limit (i.e., 500 ip domains and 500 txt records) diff --git a/module/mempool/herocache/transactions.go b/module/mempool/herocache/transactions.go index 54829c25505..89468ff794b 100644 --- a/module/mempool/herocache/transactions.go +++ b/module/mempool/herocache/transactions.go @@ -82,9 +82,9 @@ func (t Transactions) Size() uint { return t.c.Size() } -// Rem removes transaction from mempool. -func (t *Transactions) Rem(id flow.Identifier) bool { - return t.c.Rem(id) +// Remove removes transaction from mempool. +func (t *Transactions) Remove(id flow.Identifier) bool { + return t.c.Remove(id) } // Hash will return a fingerprint hash representing the contents of the diff --git a/module/mempool/herocache/transactions_test.go b/module/mempool/herocache/transactions_test.go index b2c49d93cd9..a56cd1fe4f2 100644 --- a/module/mempool/herocache/transactions_test.go +++ b/module/mempool/herocache/transactions_test.go @@ -42,7 +42,7 @@ func TestTransactionPool(t *testing.T) { }) t.Run("should be able to remove second", func(t *testing.T) { - ok := transactions.Rem(tx2.ID()) + ok := transactions.Remove(tx2.ID()) assert.True(t, ok) }) diff --git a/module/mempool/identifier_map.go b/module/mempool/identifier_map.go index 1ba345b2a27..b5cbab24926 100644 --- a/module/mempool/identifier_map.go +++ b/module/mempool/identifier_map.go @@ -9,12 +9,12 @@ type IdentifierMap interface { // Append will append the id to the list of identifiers associated with key. Append(key, id flow.Identifier) error - // Rem removes the given key with all associated identifiers. - Rem(key flow.Identifier) bool + // Remove removes the given key with all associated identifiers. + Remove(key flow.Identifier) bool - // RemIdFromKey removes the id from the list of identifiers associated with key. + // RemoveIdFromKey removes the id from the list of identifiers associated with key. // If the list becomes empty, it also removes the key from the map. - RemIdFromKey(key, id flow.Identifier) error + RemoveIdFromKey(key, id flow.Identifier) error // Get returns list of all identifiers associated with key and true, if the key exists in the mempool. // Otherwise it returns nil and false. diff --git a/module/mempool/identifiers.go b/module/mempool/identifiers.go index d15afc84af1..dcff3c24a9d 100644 --- a/module/mempool/identifiers.go +++ b/module/mempool/identifiers.go @@ -13,8 +13,8 @@ type Identifiers interface { // false if it was already in the mempool. Add(id flow.Identifier) bool - // Rem removes the given identifier - Rem(id flow.Identifier) bool + // Remove removes the given identifier + Remove(id flow.Identifier) bool // Size returns total number of identifiers in mempool Size() uint diff --git a/module/mempool/incorporated_result_seals.go b/module/mempool/incorporated_result_seals.go index 6e0c9454121..3ef75c94e69 100644 --- a/module/mempool/incorporated_result_seals.go +++ b/module/mempool/incorporated_result_seals.go @@ -21,8 +21,8 @@ type IncorporatedResultSeals interface { // Limit returns the size limit of the mempool Limit() uint - // Rem removes an IncorporatedResultSeal from the mempool - Rem(incorporatedResultID flow.Identifier) bool + // Remove removes an IncorporatedResultSeal from the mempool + Remove(incorporatedResultID flow.Identifier) bool // Size returns the number of items in the mempool Size() uint diff --git a/module/mempool/mock/assignments.go b/module/mempool/mock/assignments.go index 12078089f41..91a8f6ca4d0 100644 --- a/module/mempool/mock/assignments.go +++ b/module/mempool/mock/assignments.go @@ -81,8 +81,8 @@ func (_m *Assignments) Has(assignmentID flow.Identifier) bool { return r0 } -// Rem provides a mock function with given fields: assignmentID -func (_m *Assignments) Rem(assignmentID flow.Identifier) bool { +// Remove provides a mock function with given fields: assignmentID +func (_m *Assignments) Remove(assignmentID flow.Identifier) bool { ret := _m.Called(assignmentID) var r0 bool diff --git a/module/mempool/mock/back_data.go b/module/mempool/mock/back_data.go index 33d9eee07a7..5fc4b30f344 100644 --- a/module/mempool/mock/back_data.go +++ b/module/mempool/mock/back_data.go @@ -156,8 +156,8 @@ func (_m *BackData) Identifiers() flow.IdentifierList { return r0 } -// Rem provides a mock function with given fields: entityID -func (_m *BackData) Rem(entityID flow.Identifier) (flow.Entity, bool) { +// Remove provides a mock function with given fields: entityID +func (_m *BackData) Remove(entityID flow.Identifier) (flow.Entity, bool) { ret := _m.Called(entityID) var r0 flow.Entity diff --git a/module/mempool/mock/blocks.go b/module/mempool/mock/blocks.go index 89b7a547b4c..e110eca94a5 100644 --- a/module/mempool/mock/blocks.go +++ b/module/mempool/mock/blocks.go @@ -96,8 +96,8 @@ func (_m *Blocks) Hash() flow.Identifier { return r0 } -// Rem provides a mock function with given fields: blockID -func (_m *Blocks) Rem(blockID flow.Identifier) bool { +// Remove provides a mock function with given fields: blockID +func (_m *Blocks) Remove(blockID flow.Identifier) bool { ret := _m.Called(blockID) var r0 bool diff --git a/module/mempool/mock/chunk_data_packs.go b/module/mempool/mock/chunk_data_packs.go index 343cc1e232a..3ac617bf567 100644 --- a/module/mempool/mock/chunk_data_packs.go +++ b/module/mempool/mock/chunk_data_packs.go @@ -96,8 +96,8 @@ func (_m *ChunkDataPacks) Hash() flow.Identifier { return r0 } -// Rem provides a mock function with given fields: chunkID -func (_m *ChunkDataPacks) Rem(chunkID flow.Identifier) bool { +// Remove provides a mock function with given fields: chunkID +func (_m *ChunkDataPacks) Remove(chunkID flow.Identifier) bool { ret := _m.Called(chunkID) var r0 bool diff --git a/module/mempool/mock/chunk_requests.go b/module/mempool/mock/chunk_requests.go index edb09720abe..2d12bba5683 100644 --- a/module/mempool/mock/chunk_requests.go +++ b/module/mempool/mock/chunk_requests.go @@ -87,8 +87,8 @@ func (_m *ChunkRequests) PopAll(chunkID flow.Identifier) (chunks.LocatorMap, boo return r0, r1 } -// Rem provides a mock function with given fields: chunkID -func (_m *ChunkRequests) Rem(chunkID flow.Identifier) bool { +// Remove provides a mock function with given fields: chunkID +func (_m *ChunkRequests) Remove(chunkID flow.Identifier) bool { ret := _m.Called(chunkID) var r0 bool diff --git a/module/mempool/mock/chunk_statuses.go b/module/mempool/mock/chunk_statuses.go index c8e58cb75d8..3214f577a1b 100644 --- a/module/mempool/mock/chunk_statuses.go +++ b/module/mempool/mock/chunk_statuses.go @@ -68,8 +68,8 @@ func (_m *ChunkStatuses) Get(chunkIndex uint64, resultID flow.Identifier) (*veri return r0, r1 } -// Rem provides a mock function with given fields: chunkIndex, resultID -func (_m *ChunkStatuses) Rem(chunkIndex uint64, resultID flow.Identifier) bool { +// Remove provides a mock function with given fields: chunkIndex, resultID +func (_m *ChunkStatuses) Remove(chunkIndex uint64, resultID flow.Identifier) bool { ret := _m.Called(chunkIndex, resultID) var r0 bool diff --git a/module/mempool/mock/collections.go b/module/mempool/mock/collections.go index 88c443d2a15..fb0adf01339 100644 --- a/module/mempool/mock/collections.go +++ b/module/mempool/mock/collections.go @@ -96,8 +96,8 @@ func (_m *Collections) Hash() flow.Identifier { return r0 } -// Rem provides a mock function with given fields: collID -func (_m *Collections) Rem(collID flow.Identifier) bool { +// Remove provides a mock function with given fields: collID +func (_m *Collections) Remove(collID flow.Identifier) bool { ret := _m.Called(collID) var r0 bool diff --git a/module/mempool/mock/deltas.go b/module/mempool/mock/deltas.go index 7659b241beb..ec5eec5ab2f 100644 --- a/module/mempool/mock/deltas.go +++ b/module/mempool/mock/deltas.go @@ -112,8 +112,8 @@ func (_m *Deltas) Limit() uint { return r0 } -// Rem provides a mock function with given fields: blockID -func (_m *Deltas) Rem(blockID flow.Identifier) bool { +// Remove provides a mock function with given fields: blockID +func (_m *Deltas) Remove(blockID flow.Identifier) bool { ret := _m.Called(blockID) var r0 bool diff --git a/module/mempool/mock/guarantees.go b/module/mempool/mock/guarantees.go index 65b21f030c7..836430bb1f1 100644 --- a/module/mempool/mock/guarantees.go +++ b/module/mempool/mock/guarantees.go @@ -96,8 +96,8 @@ func (_m *Guarantees) Hash() flow.Identifier { return r0 } -// Rem provides a mock function with given fields: collID -func (_m *Guarantees) Rem(collID flow.Identifier) bool { +// Remove provides a mock function with given fields: collID +func (_m *Guarantees) Remove(collID flow.Identifier) bool { ret := _m.Called(collID) var r0 bool diff --git a/module/mempool/mock/identifier_map.go b/module/mempool/mock/identifier_map.go index 6fd534ab42f..a084e26253c 100644 --- a/module/mempool/mock/identifier_map.go +++ b/module/mempool/mock/identifier_map.go @@ -87,8 +87,8 @@ func (_m *IdentifierMap) Keys() ([]flow.Identifier, bool) { return r0, r1 } -// Rem provides a mock function with given fields: key -func (_m *IdentifierMap) Rem(key flow.Identifier) bool { +// Remove provides a mock function with given fields: key +func (_m *IdentifierMap) Remove(key flow.Identifier) bool { ret := _m.Called(key) var r0 bool @@ -101,8 +101,8 @@ func (_m *IdentifierMap) Rem(key flow.Identifier) bool { return r0 } -// RemIdFromKey provides a mock function with given fields: key, id -func (_m *IdentifierMap) RemIdFromKey(key flow.Identifier, id flow.Identifier) error { +// RemoveIdFromKey provides a mock function with given fields: key, id +func (_m *IdentifierMap) RemoveIdFromKey(key flow.Identifier, id flow.Identifier) error { ret := _m.Called(key, id) var r0 error diff --git a/module/mempool/mock/identifiers.go b/module/mempool/mock/identifiers.go index 1e0b85722d5..10d6fa5fe83 100644 --- a/module/mempool/mock/identifiers.go +++ b/module/mempool/mock/identifiers.go @@ -57,8 +57,8 @@ func (_m *Identifiers) Has(id flow.Identifier) bool { return r0 } -// Rem provides a mock function with given fields: id -func (_m *Identifiers) Rem(id flow.Identifier) bool { +// Remove provides a mock function with given fields: id +func (_m *Identifiers) Remove(id flow.Identifier) bool { ret := _m.Called(id) var r0 bool diff --git a/module/mempool/mock/incorporated_result_seals.go b/module/mempool/mock/incorporated_result_seals.go index b930f0a1da0..1bd2a265ebb 100644 --- a/module/mempool/mock/incorporated_result_seals.go +++ b/module/mempool/mock/incorporated_result_seals.go @@ -106,8 +106,8 @@ func (_m *IncorporatedResultSeals) PruneUpToHeight(height uint64) error { return r0 } -// Rem provides a mock function with given fields: incorporatedResultID -func (_m *IncorporatedResultSeals) Rem(incorporatedResultID flow.Identifier) bool { +// Remove provides a mock function with given fields: incorporatedResultID +func (_m *IncorporatedResultSeals) Remove(incorporatedResultID flow.Identifier) bool { ret := _m.Called(incorporatedResultID) var r0 bool diff --git a/module/mempool/mock/pending_receipts.go b/module/mempool/mock/pending_receipts.go index 69739203f9a..38457f982ca 100644 --- a/module/mempool/mock/pending_receipts.go +++ b/module/mempool/mock/pending_receipts.go @@ -57,8 +57,8 @@ func (_m *PendingReceipts) PruneUpToHeight(height uint64) error { return r0 } -// Rem provides a mock function with given fields: receiptID -func (_m *PendingReceipts) Rem(receiptID flow.Identifier) bool { +// Remove provides a mock function with given fields: receiptID +func (_m *PendingReceipts) Remove(receiptID flow.Identifier) bool { ret := _m.Called(receiptID) var r0 bool diff --git a/module/mempool/mock/results.go b/module/mempool/mock/results.go index a5f4dd7c776..5b37048bd38 100644 --- a/module/mempool/mock/results.go +++ b/module/mempool/mock/results.go @@ -80,8 +80,8 @@ func (_m *Results) Has(resultID flow.Identifier) bool { return r0 } -// Rem provides a mock function with given fields: resultID -func (_m *Results) Rem(resultID flow.Identifier) bool { +// Remove provides a mock function with given fields: resultID +func (_m *Results) Remove(resultID flow.Identifier) bool { ret := _m.Called(resultID) var r0 bool diff --git a/module/mempool/mock/transaction_timings.go b/module/mempool/mock/transaction_timings.go index 8757cec0e38..32f4815025d 100644 --- a/module/mempool/mock/transaction_timings.go +++ b/module/mempool/mock/transaction_timings.go @@ -89,8 +89,8 @@ func (_m *TransactionTimings) ByID(txID flow.Identifier) (*flow.TransactionTimin return r0, r1 } -// Rem provides a mock function with given fields: txID -func (_m *TransactionTimings) Rem(txID flow.Identifier) bool { +// Remove provides a mock function with given fields: txID +func (_m *TransactionTimings) Remove(txID flow.Identifier) bool { ret := _m.Called(txID) var r0 bool diff --git a/module/mempool/mock/transactions.go b/module/mempool/mock/transactions.go index ff3c6704596..88166c3c53f 100644 --- a/module/mempool/mock/transactions.go +++ b/module/mempool/mock/transactions.go @@ -101,8 +101,8 @@ func (_m *Transactions) Hash() flow.Identifier { return r0 } -// Rem provides a mock function with given fields: txID -func (_m *Transactions) Rem(txID flow.Identifier) bool { +// Remove provides a mock function with given fields: txID +func (_m *Transactions) Remove(txID flow.Identifier) bool { ret := _m.Called(txID) var r0 bool diff --git a/module/mempool/pending_receipts.go b/module/mempool/pending_receipts.go index 84096d848a0..b26136d2a0c 100644 --- a/module/mempool/pending_receipts.go +++ b/module/mempool/pending_receipts.go @@ -12,7 +12,7 @@ type PendingReceipts interface { Add(receipt *flow.ExecutionReceipt) bool // Remove a pending receipt by ID - Rem(receiptID flow.Identifier) bool + Remove(receiptID flow.Identifier) bool // ByPreviousResultID returns all the pending receipts whose previous result id // matches the given result id diff --git a/module/mempool/results.go b/module/mempool/results.go index 65aaf70ec25..1c4a8a6e875 100644 --- a/module/mempool/results.go +++ b/module/mempool/results.go @@ -14,8 +14,8 @@ type Results interface { // false if it was already in the mempool. Add(result *flow.ExecutionResult) bool - // Rem will attempt to remove the result from the memory pool. - Rem(resultID flow.Identifier) bool + // Remove will attempt to remove the result from the memory pool. + Remove(resultID flow.Identifier) bool // ByID retrieve the execution result with the given ID from the memory pool. // It will return false if it was not found in the mempool. diff --git a/module/mempool/state_deltas.go b/module/mempool/state_deltas.go index 3ec0aef2061..e41997554ee 100644 --- a/module/mempool/state_deltas.go +++ b/module/mempool/state_deltas.go @@ -18,9 +18,9 @@ type Deltas interface { // false if it was already in the mempool. Add(delta *messages.ExecutionStateDelta) bool - // Rem will remove the given block delta from the memory pool; it will + // Remove will remove the given block delta from the memory pool; it will // will return true if the block delta was known and removed. - Rem(blockID flow.Identifier) bool + Remove(blockID flow.Identifier) bool // ByID retrieve the block delta with the given ID from the memory // pool. It will return false if it was not found in the mempool. diff --git a/module/mempool/stdmap/assignments.go b/module/mempool/stdmap/assignments.go index 30d0ea178ff..d817477c64e 100644 --- a/module/mempool/stdmap/assignments.go +++ b/module/mempool/stdmap/assignments.go @@ -40,10 +40,10 @@ func (a *Assignments) Add(fingerprint flow.Identifier, assignment *chunkmodels.A return a.Backend.Add(chunkmodels.NewAssignmentDataPack(fingerprint, assignment)) } -// Rem will remove the given Assignment from the memory pool; it will +// Remove will remove the given Assignment from the memory pool; it will // return true if the Assignment was known and removed. -func (a *Assignments) Rem(assignmentID flow.Identifier) bool { - return a.Backend.Rem(assignmentID) +func (a *Assignments) Remove(assignmentID flow.Identifier) bool { + return a.Backend.Remove(assignmentID) } // Size will return the current size of the memory pool. diff --git a/module/mempool/stdmap/backDataHeapBenchmark_test.go b/module/mempool/stdmap/backDataHeapBenchmark_test.go index f3f62e1f83a..8cc78130b64 100644 --- a/module/mempool/stdmap/backDataHeapBenchmark_test.go +++ b/module/mempool/stdmap/backDataHeapBenchmark_test.go @@ -137,8 +137,8 @@ func (b *baselineLRU) Add(entityID flow.Identifier, entity flow.Entity) bool { return true } -// Rem will remove the item with the given hash. -func (b *baselineLRU) Rem(entityID flow.Identifier) (flow.Entity, bool) { +// Remove will remove the item with the given hash. +func (b *baselineLRU) Remove(entityID flow.Identifier) (flow.Entity, bool) { e, ok := b.c.Get(entityID) if !ok { return nil, false @@ -154,7 +154,7 @@ func (b *baselineLRU) Rem(entityID flow.Identifier) (flow.Entity, bool) { // Adjust will adjust the value item using the given function if the given key can be found. // Returns a bool which indicates whether the value was updated as well as the updated value func (b *baselineLRU) Adjust(entityID flow.Identifier, f func(flow.Entity) flow.Entity) (flow.Entity, bool) { - entity, removed := b.Rem(entityID) + entity, removed := b.Remove(entityID) if !removed { return nil, false } diff --git a/module/mempool/stdmap/backdata/mapBackData.go b/module/mempool/stdmap/backdata/mapBackData.go index 81cb42aeb04..bf22a54e438 100644 --- a/module/mempool/stdmap/backdata/mapBackData.go +++ b/module/mempool/stdmap/backdata/mapBackData.go @@ -34,8 +34,8 @@ func (b *MapBackData) Add(entityID flow.Identifier, entity flow.Entity) bool { return true } -// Rem removes the entity with the given identifier. -func (b *MapBackData) Rem(entityID flow.Identifier) (flow.Entity, bool) { +// Remove removes the entity with the given identifier. +func (b *MapBackData) Remove(entityID flow.Identifier) (flow.Entity, bool) { entity, exists := b.entities[entityID] if !exists { return nil, false diff --git a/module/mempool/stdmap/backend.go b/module/mempool/stdmap/backend.go index f21d86434e7..a14e679a45e 100644 --- a/module/mempool/stdmap/backend.go +++ b/module/mempool/stdmap/backend.go @@ -69,16 +69,16 @@ func (b *Backend) Add(entity flow.Entity) bool { return added } -// Rem will remove the item with the given hash. -func (b *Backend) Rem(entityID flow.Identifier) bool { - //bs1 := binstat.EnterTime(binstat.BinStdmap + ".w_lock.(Backend)Rem") +// Remove will remove the item with the given hash. +func (b *Backend) Remove(entityID flow.Identifier) bool { + //bs1 := binstat.EnterTime(binstat.BinStdmap + ".w_lock.(Backend)Remove") b.Lock() //binstat.Leave(bs1) - //bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)Rem") + //bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)Remove") //defer binstat.Leave(bs2) defer b.Unlock() - _, removed := b.backData.Rem(entityID) + _, removed := b.backData.Remove(entityID) return removed } diff --git a/module/mempool/stdmap/backend_test.go b/module/mempool/stdmap/backend_test.go index 81309db496c..87a516f5c3d 100644 --- a/module/mempool/stdmap/backend_test.go +++ b/module/mempool/stdmap/backend_test.go @@ -18,7 +18,7 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) -func TestAddRem(t *testing.T) { +func TestAddRemove(t *testing.T) { item1 := unittest.MockEntityFixture() item2 := unittest.MockEntityFixture() @@ -41,7 +41,7 @@ func TestAddRem(t *testing.T) { }) t.Run("should be able to remove first", func(t *testing.T) { - removed := pool.Rem(item1.ID()) + removed := pool.Remove(item1.ID()) assert.True(t, removed) size := pool.Size() assert.EqualValues(t, uint(1), size) diff --git a/module/mempool/stdmap/chunk_data_packs.go b/module/mempool/stdmap/chunk_data_packs.go index f2129911b7b..9e5a2254e03 100644 --- a/module/mempool/stdmap/chunk_data_packs.go +++ b/module/mempool/stdmap/chunk_data_packs.go @@ -30,9 +30,9 @@ func (c *ChunkDataPacks) Add(cdp *flow.ChunkDataPack) bool { return added } -// Rem will remove chunk data pack by ID -func (c *ChunkDataPacks) Rem(chunkID flow.Identifier) bool { - removed := c.Backend.Rem(chunkID) +// Remove will remove chunk data pack by ID +func (c *ChunkDataPacks) Remove(chunkID flow.Identifier) bool { + removed := c.Backend.Remove(chunkID) return removed } diff --git a/module/mempool/stdmap/chunk_requests.go b/module/mempool/stdmap/chunk_requests.go index 5e301f2e5ad..28a37d36290 100644 --- a/module/mempool/stdmap/chunk_requests.go +++ b/module/mempool/stdmap/chunk_requests.go @@ -101,11 +101,11 @@ func (cs *ChunkRequests) Add(request *verification.ChunkDataPackRequest) bool { return err == nil } -// Rem provides deletion functionality from the memory pool. -// If there is a chunk request with this ID, Rem removes it and returns true. +// Remove provides deletion functionality from the memory pool. +// If there is a chunk request with this ID, Remove removes it and returns true. // Otherwise it returns false. -func (cs *ChunkRequests) Rem(chunkID flow.Identifier) bool { - return cs.Backend.Rem(chunkID) +func (cs *ChunkRequests) Remove(chunkID flow.Identifier) bool { + return cs.Backend.Remove(chunkID) } // PopAll atomically returns all locators associated with this chunk ID while clearing out the @@ -122,7 +122,7 @@ func (cs *ChunkRequests) PopAll(chunkID flow.Identifier) (chunks.LocatorMap, boo } locators = toChunkRequestStatus(entity).Locators - _, removed := backdata.Rem(chunkID) + _, removed := backdata.Remove(chunkID) if !removed { return fmt.Errorf("potential race condition on removing chunk request from mempool") } diff --git a/module/mempool/stdmap/chunk_statuses.go b/module/mempool/stdmap/chunk_statuses.go index e9aa37b51f0..25d2cf38930 100644 --- a/module/mempool/stdmap/chunk_statuses.go +++ b/module/mempool/stdmap/chunk_statuses.go @@ -55,12 +55,12 @@ func (cs *ChunkStatuses) Add(status *verification.ChunkStatus) bool { }) } -// Rem provides deletion functionality from the memory pool based on the pair of +// Remove provides deletion functionality from the memory pool based on the pair of // chunk index and result id. -// If there is a chunk status associated with this pair, Rem removes it and returns true. +// If there is a chunk status associated with this pair, Remove removes it and returns true. // Otherwise, it returns false. -func (cs *ChunkStatuses) Rem(chunkIndex uint64, resultID flow.Identifier) bool { - return cs.Backend.Rem(chunks.ChunkLocatorID(resultID, chunkIndex)) +func (cs *ChunkStatuses) Remove(chunkIndex uint64, resultID flow.Identifier) bool { + return cs.Backend.Remove(chunks.ChunkLocatorID(resultID, chunkIndex)) } // All returns all chunk statuses stored in this memory pool. diff --git a/module/mempool/stdmap/collections.go b/module/mempool/stdmap/collections.go index ae964e9a4cb..51b91739191 100644 --- a/module/mempool/stdmap/collections.go +++ b/module/mempool/stdmap/collections.go @@ -25,9 +25,9 @@ func (c *Collections) Add(coll *flow.Collection) bool { return added } -// Rem removes a collection by ID from memory -func (c *Collections) Rem(collID flow.Identifier) bool { - ok := c.Backend.Rem(collID) +// Remove removes a collection by ID from memory +func (c *Collections) Remove(collID flow.Identifier) bool { + ok := c.Backend.Remove(collID) return ok } diff --git a/module/mempool/stdmap/eject.go b/module/mempool/stdmap/eject.go index 8c16a9c60fd..4bbf321bcc7 100644 --- a/module/mempool/stdmap/eject.go +++ b/module/mempool/stdmap/eject.go @@ -85,7 +85,7 @@ func EjectTrueRandomFast(b *Backend) bool { i := 0 // index into the entities map for entityID, entity := range b.backData.All() { if i == next2Remove { - b.backData.Rem(entityID) // remove entity + b.backData.Remove(entityID) // remove entity for _, callback := range b.ejectionCallbacks { callback(entity) // notify callback } diff --git a/module/mempool/stdmap/guarantees_test.go b/module/mempool/stdmap/guarantees_test.go index c7d83d04b99..7bc356dd21b 100644 --- a/module/mempool/stdmap/guarantees_test.go +++ b/module/mempool/stdmap/guarantees_test.go @@ -45,7 +45,7 @@ func TestGuaranteePool(t *testing.T) { }) t.Run("should be able to remove second", func(t *testing.T) { - ok := pool.Rem(item2.ID()) + ok := pool.Remove(item2.ID()) assert.True(t, ok) }) diff --git a/module/mempool/stdmap/identifier_map.go b/module/mempool/stdmap/identifier_map.go index 58acd51ac65..4bb4c6f0716 100644 --- a/module/mempool/stdmap/identifier_map.go +++ b/module/mempool/stdmap/identifier_map.go @@ -44,7 +44,7 @@ func (i *IdentifierMap) Append(key, id flow.Identifier) error { } // removes map entry associated with key for update - if _, removed := backdata.Rem(key); !removed { + if _, removed := backdata.Remove(key); !removed { return fmt.Errorf("potential race condition on removing from identifier map") } } @@ -100,14 +100,14 @@ func (i *IdentifierMap) Has(key flow.Identifier) bool { return i.Backend.Has(key) } -// Rem removes the given key with all associated identifiers. -func (i *IdentifierMap) Rem(key flow.Identifier) bool { - return i.Backend.Rem(key) +// Remove removes the given key with all associated identifiers. +func (i *IdentifierMap) Remove(key flow.Identifier) bool { + return i.Backend.Remove(key) } -// RemIdFromKey removes the id from the list of identifiers associated with key. +// RemoveIdFromKey removes the id from the list of identifiers associated with key. // If the list becomes empty, it also removes the key from the map. -func (i *IdentifierMap) RemIdFromKey(key, id flow.Identifier) error { +func (i *IdentifierMap) RemoveIdFromKey(key, id flow.Identifier) error { err := i.Backend.Run(func(backdata mempool.BackData) error { // var ids map[flow.Identifier]struct{} entity, ok := backdata.ByID(key) @@ -127,7 +127,7 @@ func (i *IdentifierMap) RemIdFromKey(key, id flow.Identifier) error { } // removes map entry associated with key for update - if _, removed := backdata.Rem(key); !removed { + if _, removed := backdata.Remove(key); !removed { return fmt.Errorf("potential race condition on removing from identifier map") } diff --git a/module/mempool/stdmap/identifier_map_test.go b/module/mempool/stdmap/identifier_map_test.go index 22052701156..e493438e4b8 100644 --- a/module/mempool/stdmap/identifier_map_test.go +++ b/module/mempool/stdmap/identifier_map_test.go @@ -75,7 +75,7 @@ func TestIdentiferMap(t *testing.T) { // tests against removing a key t.Run("removing key", func(t *testing.T) { - ok := idMap.Rem(key1) + ok := idMap.Remove(key1) require.True(t, ok) // getting removed key should return false @@ -123,7 +123,7 @@ func TestIdentiferMap(t *testing.T) { // removes id1 and id2 from key3 // removing id1 - err = idMap.RemIdFromKey(key3, id1) + err = idMap.RemoveIdFromKey(key3, id1) require.NoError(t, err) // key3 should still reside on idMap and id2 should be attached to it @@ -133,7 +133,7 @@ func TestIdentiferMap(t *testing.T) { require.Contains(t, ids, id2) // removing id2 - err = idMap.RemIdFromKey(key3, id2) + err = idMap.RemoveIdFromKey(key3, id2) require.NoError(t, err) // by removing id2 from key3, it is left out of id @@ -150,7 +150,7 @@ func TestIdentiferMap(t *testing.T) { } // TestRaceCondition is meant for running with `-race` flag. -// It performs Append, Has, Get, and RemIdFromKey methods of IdentifierMap concurrently +// It performs Append, Has, Get, and RemoveIdFromKey methods of IdentifierMap concurrently // each in a different goroutine. // Running this test with `-race` flag detects and reports the existence of race condition if // it is the case. @@ -181,7 +181,7 @@ func TestRaceCondition(t *testing.T) { go func() { defer wg.Done() - require.NoError(t, idMap.RemIdFromKey(key, id)) + require.NoError(t, idMap.RemoveIdFromKey(key, id)) }() unittest.RequireReturnsBefore(t, wg.Wait, 1*time.Second, "test could not finish on time") diff --git a/module/mempool/stdmap/identifiers.go b/module/mempool/stdmap/identifiers.go index 56d09397d29..effd3f2ea35 100644 --- a/module/mempool/stdmap/identifiers.go +++ b/module/mempool/stdmap/identifiers.go @@ -33,10 +33,10 @@ func (i *Identifiers) Has(id flow.Identifier) bool { return i.Backend.Has(id) } -// Rem removes the given identifier from the memory pool; it will +// Remove removes the given identifier from the memory pool; it will // return true if the identifier was known and removed. -func (i *Identifiers) Rem(id flow.Identifier) bool { - return i.Backend.Rem(id) +func (i *Identifiers) Remove(id flow.Identifier) bool { + return i.Backend.Remove(id) } // All returns all identifiers stored in the mempool diff --git a/module/mempool/stdmap/incorporated_result_seals.go b/module/mempool/stdmap/incorporated_result_seals.go index 955018a9b14..81897f5f34f 100644 --- a/module/mempool/stdmap/incorporated_result_seals.go +++ b/module/mempool/stdmap/incorporated_result_seals.go @@ -54,7 +54,7 @@ func (ir *IncorporatedResultSeals) removeFromIndex(id flow.Identifier, height ui func (ir *IncorporatedResultSeals) removeByHeight(height uint64) { for sealID := range ir.byHeight[height] { - ir.backData.Rem(sealID) + ir.backData.Remove(sealID) } delete(ir.byHeight, height) } @@ -113,12 +113,12 @@ func (ir *IncorporatedResultSeals) ByID(id flow.Identifier) (*flow.IncorporatedR return entity.(*flow.IncorporatedResultSeal), true } -// Rem removes an IncorporatedResultSeal from the mempool -func (ir *IncorporatedResultSeals) Rem(id flow.Identifier) bool { +// Remove removes an IncorporatedResultSeal from the mempool +func (ir *IncorporatedResultSeals) Remove(id flow.Identifier) bool { removed := false err := ir.Backend.Run(func(_ mempool.BackData) error { var entity flow.Entity - entity, removed = ir.backData.Rem(id) + entity, removed = ir.backData.Remove(id) if !removed { return nil } diff --git a/module/mempool/stdmap/incorporated_result_seals_test.go b/module/mempool/stdmap/incorporated_result_seals_test.go index 86b3f0ca29b..fb1a4b450b9 100644 --- a/module/mempool/stdmap/incorporated_result_seals_test.go +++ b/module/mempool/stdmap/incorporated_result_seals_test.go @@ -110,8 +110,8 @@ func (m *icrSealsMachine) GetUnknown(t *rapid.T) { } -// Rem is a conditional action that removes a known element from the icrSeals -func (m *icrSealsMachine) Rem(t *rapid.T) { +// Remove is a conditional action that removes a known element from the icrSeals +func (m *icrSealsMachine) Remove(t *rapid.T) { n := len(m.state) // skip if the store is empty if n == 0 { @@ -120,7 +120,7 @@ func (m *icrSealsMachine) Rem(t *rapid.T) { i := rapid.IntRange(0, n-1).Draw(t, "i").(int) s := m.state[i] - ok := m.icrs.Rem(s.ID()) + ok := m.icrs.Remove(s.ID()) require.True(t, ok) // remove m[i], we don't care about ordering here @@ -129,9 +129,9 @@ func (m *icrSealsMachine) Rem(t *rapid.T) { } -// RemUnknown is an action that removes an unknown element from the icrSeals -// This mostly tests Rem has no insertion side-effects -func (m *icrSealsMachine) RemUnknown(t *rapid.T) { +// RemoveUnknown is an action that removes an unknown element from the icrSeals +// This mostly tests Remove has no insertion side-effects +func (m *icrSealsMachine) RemoveUnknown(t *rapid.T) { n := len(m.state) // skip if the store is empty if n == 0 { @@ -151,7 +151,7 @@ func (m *icrSealsMachine) RemUnknown(t *rapid.T) { } if unknown { - removed := m.icrs.Rem(seal.ID()) + removed := m.icrs.Remove(seal.ID()) require.False(t, removed) } // no modification of state @@ -189,7 +189,7 @@ func TestIncorporatedResultSeals(t *testing.T) { require.True(t, ok) require.Equal(t, seal, actual) - deleted := pool.Rem(seal.ID()) + deleted := pool.Remove(seal.ID()) require.True(t, deleted) _, ok = pool.ByID(seal.ID()) diff --git a/module/mempool/stdmap/pending_receipts.go b/module/mempool/stdmap/pending_receipts.go index 0aa654584f4..8a443504382 100644 --- a/module/mempool/stdmap/pending_receipts.go +++ b/module/mempool/stdmap/pending_receipts.go @@ -63,7 +63,7 @@ func removeReceipt( byPreviousResultID map[flow.Identifier]receiptsSet) { receiptID := receipt.ID() - entities.Rem(receiptID) + entities.Remove(receiptID) index := indexByPreviousResultID(receipt) siblings := byPreviousResultID[index] @@ -122,8 +122,8 @@ func (r *PendingReceipts) Add(receipt *flow.ExecutionReceipt) bool { return added } -// Rem will remove a receipt by ID. -func (r *PendingReceipts) Rem(receiptID flow.Identifier) bool { +// Remove will remove a receipt by ID. +func (r *PendingReceipts) Remove(receiptID flow.Identifier) bool { removed := false err := r.Backend.Run(func(backData mempool.BackData) error { entity, ok := backData.ByID(receiptID) diff --git a/module/mempool/stdmap/pending_receipts_test.go b/module/mempool/stdmap/pending_receipts_test.go index 32644a4cbdb..2c60fc0cdb9 100644 --- a/module/mempool/stdmap/pending_receipts_test.go +++ b/module/mempool/stdmap/pending_receipts_test.go @@ -41,7 +41,7 @@ func TestPendingReceipts(t *testing.T) { actual := pool.ByPreviousResultID(r.ExecutionResult.PreviousResultID) require.Equal(t, []*flow.ExecutionReceipt{r}, actual) - deleted := pool.Rem(r.ID()) + deleted := pool.Remove(r.ID()) require.True(t, deleted) actual = pool.ByPreviousResultID(r.ExecutionResult.PreviousResultID) @@ -83,7 +83,7 @@ func TestPendingReceipts(t *testing.T) { for i := 0; i < 100; i++ { r := rs[i] - ok := pool.Rem(r.ID()) + ok := pool.Remove(r.ID()) require.True(t, ok) } @@ -145,7 +145,7 @@ func TestPendingReceipts(t *testing.T) { // since there are 60 left, should remove 60 in total total = 0 for i := 0; i < 100; i++ { - ok := pool.Rem(rs[i].ID()) + ok := pool.Remove(rs[i].ID()) if ok { total++ } @@ -175,7 +175,7 @@ func TestPendingReceipts(t *testing.T) { unittest.Concurrently(100, func(i int) { r := rs[i] - ok := pool.Rem(r.ID()) + ok := pool.Remove(r.ID()) require.True(t, ok) }) diff --git a/module/mempool/stdmap/receipts.go b/module/mempool/stdmap/receipts.go index 4bdac3d6868..9e416972814 100644 --- a/module/mempool/stdmap/receipts.go +++ b/module/mempool/stdmap/receipts.go @@ -27,9 +27,9 @@ func (r *Receipts) Add(receipt *flow.ExecutionReceipt) bool { return added } -// Rem will remove a receipt by ID. -func (r *Receipts) Rem(receiptID flow.Identifier) bool { - removed := r.Backend.Rem(receiptID) +// Remove will remove a receipt by ID. +func (r *Receipts) Remove(receiptID flow.Identifier) bool { + removed := r.Backend.Remove(receiptID) return removed } diff --git a/module/mempool/stdmap/receipts_test.go b/module/mempool/stdmap/receipts_test.go index d0da9c7bd18..a664126caaf 100644 --- a/module/mempool/stdmap/receipts_test.go +++ b/module/mempool/stdmap/receipts_test.go @@ -41,7 +41,7 @@ func TestReceiptPool(t *testing.T) { }) t.Run("should be able to remove second", func(t *testing.T) { - ok := pool.Rem(item2.ID()) + ok := pool.Remove(item2.ID()) assert.True(t, ok) }) diff --git a/module/mempool/stdmap/results.go b/module/mempool/stdmap/results.go index e33c9ffdae0..ac679c37a89 100644 --- a/module/mempool/stdmap/results.go +++ b/module/mempool/stdmap/results.go @@ -27,9 +27,9 @@ func (r *Results) Add(result *flow.ExecutionResult) bool { return added } -// Rem will remove a result by ID. -func (r *Results) Rem(resultID flow.Identifier) bool { - removed := r.Backend.Rem(resultID) +// Remove will remove a result by ID. +func (r *Results) Remove(resultID flow.Identifier) bool { + removed := r.Backend.Remove(resultID) return removed } diff --git a/module/mempool/stdmap/times.go b/module/mempool/stdmap/times.go index aae75279e4c..e5e7c33218f 100644 --- a/module/mempool/stdmap/times.go +++ b/module/mempool/stdmap/times.go @@ -55,7 +55,7 @@ func (t *Times) ByID(id flow.Identifier) (time.Time, bool) { return tt.ti, true } -// Rem removes the time with the given ID. -func (t *Times) Rem(id flow.Identifier) bool { - return t.Backend.Rem(id) +// Remove removes the time with the given ID. +func (t *Times) Remove(id flow.Identifier) bool { + return t.Backend.Remove(id) } diff --git a/module/mempool/stdmap/times_test.go b/module/mempool/stdmap/times_test.go index 338211610d8..40ca4b18bae 100644 --- a/module/mempool/stdmap/times_test.go +++ b/module/mempool/stdmap/times_test.go @@ -32,7 +32,7 @@ func TestTimesPool(t *testing.T) { }) t.Run("should be able to remove", func(t *testing.T) { - ok := pool.Rem(id) + ok := pool.Remove(id) assert.True(t, ok) }) } diff --git a/module/mempool/stdmap/transaction_timings.go b/module/mempool/stdmap/transaction_timings.go index 543331b4edc..407347304ea 100644 --- a/module/mempool/stdmap/transaction_timings.go +++ b/module/mempool/stdmap/transaction_timings.go @@ -70,7 +70,7 @@ func (t *TransactionTimings) All() []*flow.TransactionTiming { return txs } -// Rem removes the transaction timing with the given ID. -func (t *TransactionTimings) Rem(txID flow.Identifier) bool { - return t.Backend.Rem(txID) +// Remove removes the transaction timing with the given ID. +func (t *TransactionTimings) Remove(txID flow.Identifier) bool { + return t.Backend.Remove(txID) } diff --git a/module/mempool/stdmap/transaction_timings_test.go b/module/mempool/stdmap/transaction_timings_test.go index 109c20a4e74..dc2d818b7ef 100644 --- a/module/mempool/stdmap/transaction_timings_test.go +++ b/module/mempool/stdmap/transaction_timings_test.go @@ -54,7 +54,7 @@ func TestTransactionTimingsPool(t *testing.T) { }) t.Run("should be able to remove second", func(t *testing.T) { - ok := pool.Rem(item2.ID()) + ok := pool.Remove(item2.ID()) assert.True(t, ok) }) diff --git a/module/mempool/stdmap/transactions_test.go b/module/mempool/stdmap/transactions_test.go index 24a40f77396..f16da60d505 100644 --- a/module/mempool/stdmap/transactions_test.go +++ b/module/mempool/stdmap/transactions_test.go @@ -42,7 +42,7 @@ func TestTransactionPool(t *testing.T) { }) t.Run("should be able to remove second", func(t *testing.T) { - ok := pool.Rem(item2.ID()) + ok := pool.Remove(item2.ID()) assert.True(t, ok) }) diff --git a/module/mempool/transaction_timings.go b/module/mempool/transaction_timings.go index 8dd9c38fe4a..f64f07d59d1 100644 --- a/module/mempool/transaction_timings.go +++ b/module/mempool/transaction_timings.go @@ -22,6 +22,6 @@ type TransactionTimings interface { // All returns all transaction timings from the mempool. All() []*flow.TransactionTiming - // Rem removes the transaction timing with the given ID. - Rem(txID flow.Identifier) bool + // Remove removes the transaction timing with the given ID. + Remove(txID flow.Identifier) bool } diff --git a/module/mempool/transactions.go b/module/mempool/transactions.go index 722a421d40e..ad5ef280b76 100644 --- a/module/mempool/transactions.go +++ b/module/mempool/transactions.go @@ -17,9 +17,9 @@ type Transactions interface { // return false if it was already in the mempool. Add(tx *flow.TransactionBody) bool - // Rem will remove the given transaction from the memory pool; it will + // Remove will remove the given transaction from the memory pool; it will // will return true if the transaction was known and removed. - Rem(txID flow.Identifier) bool + Remove(txID flow.Identifier) bool // ByID retrieve the transaction with the given ID from the memory // pool. It will return false if it was not found in the mempool. diff --git a/module/metrics/transaction.go b/module/metrics/transaction.go index e9a1dd54e67..333283567af 100644 --- a/module/metrics/transaction.go +++ b/module/metrics/transaction.go @@ -187,7 +187,7 @@ func (tc *TransactionCollector) TransactionFinalized(txID flow.Identifier, when // remove transaction timing from mempool if finalized and executed if !t.Finalized.IsZero() && !t.Executed.IsZero() { - tc.transactionTimings.Rem(txID) + tc.transactionTimings.Remove(txID) } } @@ -209,7 +209,7 @@ func (tc *TransactionCollector) TransactionExecuted(txID flow.Identifier, when t // remove transaction timing from mempool if finalized and executed if !t.Finalized.IsZero() && !t.Executed.IsZero() { - tc.transactionTimings.Rem(txID) + tc.transactionTimings.Remove(txID) } } @@ -273,7 +273,7 @@ func (tc *TransactionCollector) TransactionExpired(txID flow.Identifier) { return } tc.transactionSubmission.WithLabelValues("expired").Inc() - tc.transactionTimings.Rem(txID) + tc.transactionTimings.Remove(txID) } func (tc *TransactionCollector) UpdateExecutionReceiptMaxHeight(height uint64) { From fce032d9fc1aff290d71aac971b9763198d88211 Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Mon, 11 Jul 2022 15:33:27 -0700 Subject: [PATCH 125/223] duplicate error in the return statement --- integration/utils/contLoadGenerator.go | 6 +++--- integration/utils/tx_result.go | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/integration/utils/contLoadGenerator.go b/integration/utils/contLoadGenerator.go index 1b47abced1d..b5699b09b19 100644 --- a/integration/utils/contLoadGenerator.go +++ b/integration/utils/contLoadGenerator.go @@ -276,9 +276,9 @@ func (lg *ContLoadGenerator) createAccounts(num int) error { <-ch log := lg.log.With().Str("tx_id", createAccountTx.ID().String()).Logger() - result := GetTransactionResult(context.Background(), lg.flowClient, createAccountTx.ID()) - if result.Error != nil { - return fmt.Errorf("failed to get transactions result: %w", result.Error) + result, err := GetTransactionResult(context.Background(), lg.flowClient, createAccountTx.ID()) + if err != nil { + return fmt.Errorf("failed to get transactions result: %w", err) } log.Trace().Str("status", result.Status.String()).Msg("account creation tx executed") diff --git a/integration/utils/tx_result.go b/integration/utils/tx_result.go index 0bc2a15bec6..da192e7e2f7 100644 --- a/integration/utils/tx_result.go +++ b/integration/utils/tx_result.go @@ -12,7 +12,7 @@ import ( ) // GetTransactionResult waits for the transaction to get into the terminal state and returns the result. -func GetTransactionResult(ctx context.Context, client access.Client, txID flowsdk.Identifier) *flowsdk.TransactionResult { +func GetTransactionResult(ctx context.Context, client access.Client, txID flowsdk.Identifier) (*flowsdk.TransactionResult, error) { var b retry.Backoff b = retry.NewFibonacci(100 * time.Millisecond) b = retry.WithMaxDuration(60*time.Second, b) @@ -38,7 +38,7 @@ func GetTransactionResult(ctx context.Context, client access.Client, txID flowsd } }) if err != nil { - return &flowsdk.TransactionResult{Error: err} + return &flowsdk.TransactionResult{Error: err}, err } - return result + return result, result.Error } From 583887ef8da1511ea826aa1a32cedccd73c2a1f3 Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Mon, 11 Jul 2022 15:48:23 -0700 Subject: [PATCH 126/223] improve naming --- integration/utils/contLoadGenerator.go | 2 +- integration/utils/tx_result.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/integration/utils/contLoadGenerator.go b/integration/utils/contLoadGenerator.go index b5699b09b19..a4d00ddfb2c 100644 --- a/integration/utils/contLoadGenerator.go +++ b/integration/utils/contLoadGenerator.go @@ -276,7 +276,7 @@ func (lg *ContLoadGenerator) createAccounts(num int) error { <-ch log := lg.log.With().Str("tx_id", createAccountTx.ID().String()).Logger() - result, err := GetTransactionResult(context.Background(), lg.flowClient, createAccountTx.ID()) + result, err := WaitForTransactionResult(context.Background(), lg.flowClient, createAccountTx.ID()) if err != nil { return fmt.Errorf("failed to get transactions result: %w", err) } diff --git a/integration/utils/tx_result.go b/integration/utils/tx_result.go index da192e7e2f7..1b791a0f310 100644 --- a/integration/utils/tx_result.go +++ b/integration/utils/tx_result.go @@ -11,8 +11,8 @@ import ( "github.com/onflow/flow-go-sdk/access" ) -// GetTransactionResult waits for the transaction to get into the terminal state and returns the result. -func GetTransactionResult(ctx context.Context, client access.Client, txID flowsdk.Identifier) (*flowsdk.TransactionResult, error) { +// WaitForTransactionResult waits for the transaction to get into the terminal state and returns the result. +func WaitForTransactionResult(ctx context.Context, client access.Client, txID flowsdk.Identifier) (*flowsdk.TransactionResult, error) { var b retry.Backoff b = retry.NewFibonacci(100 * time.Millisecond) b = retry.WithMaxDuration(60*time.Second, b) From 4e330f582843970cedd5a1b91c796f6253367171 Mon Sep 17 00:00:00 2001 From: Yahya Hassanzadeh Date: Tue, 12 Jul 2022 10:44:25 +0200 Subject: [PATCH 127/223] [BFT Testing] Fixes wintermute orchestrator race condition (#2780) * adds some comments * adds mutual exclusion to attack orchestrator * removes debug lines --- insecure/wintermute/attackOrchestrator.go | 65 ++++++++++++++----- .../wintermute/attackOrchestrator_test.go | 6 +- 2 files changed, 53 insertions(+), 18 deletions(-) diff --git a/insecure/wintermute/attackOrchestrator.go b/insecure/wintermute/attackOrchestrator.go index 25e0b2af713..92c60ba88d5 100644 --- a/insecure/wintermute/attackOrchestrator.go +++ b/insecure/wintermute/attackOrchestrator.go @@ -28,7 +28,9 @@ import ( // corrupted VN. // 5. Any other incoming messages to the orchestrator are passed through, i.e., are sent as they are in the original Flow network without any tampering. type Orchestrator struct { - sync.Mutex + attackStateLock sync.RWMutex // providing mutual exclusion for external reads to attack state. + receiptHandleLock sync.Mutex // ensuring at most one receipt is handled at a time, to avoid corrupting two concurrent receipts. + logger zerolog.Logger state *attackState @@ -88,9 +90,16 @@ func (o *Orchestrator) HandleEventFromCorruptedNode(event *insecure.Event) error default: // Any other event is just passed through the network as it is. err := o.network.Send(event) + if err != nil { return fmt.Errorf("could not send rpc on channel: %w", err) } + o.logger.Debug(). + Hex("corrupted_node_id", logging.ID(event.CorruptedNodeId)). + Str("channel_id", string(event.Channel)). + Str("protocol", event.Protocol.String()). + Str("type", fmt.Sprintf("%T", event)). + Msg("miscellaneous event has passed through") } return nil @@ -124,6 +133,10 @@ func (o *Orchestrator) corruptExecutionResult(receipt *flow.ExecutionReceipt) *f // If no attack has already been conducted, it corrupts the result of receipt and sends it to all corrupted execution nodes. // Otherwise, it just passes through the receipt to the sender. func (o *Orchestrator) handleExecutionReceiptEvent(receiptEvent *insecure.Event) error { + // ensuring at most one receipt is handled at a time, to avoid corrupting two concurrent receipts + o.receiptHandleLock.Lock() + defer o.receiptHandleLock.Unlock() + ok := o.corruptedNodeIds.Contains(receiptEvent.CorruptedNodeId) if !ok { return fmt.Errorf("sender of the event is not a corrupted node") @@ -154,8 +167,8 @@ func (o *Orchestrator) handleExecutionReceiptEvent(receiptEvent *insecure.Event) Uint32("targets_num", receiptEvent.TargetNum). Str("target_ids", fmt.Sprintf("%v", receiptEvent.TargetIds)).Logger() - if o.state != nil { - // non-nil state means an execution result has already been corrupted. + if _, _, conducted := o.AttackState(); conducted { + // an attack has already been conducted if receipt.ExecutionResult.ID() == o.state.originalResult.ID() { // receipt contains the original result that has been corrupted. // corrupted result must have already been sent to this node, so @@ -168,7 +181,6 @@ func (o *Orchestrator) handleExecutionReceiptEvent(receiptEvent *insecure.Event) if err != nil { return fmt.Errorf("could not send rpc on channel: %w", err) } - lg.Info().Msg("receipt event passed through") return nil } @@ -197,13 +209,14 @@ func (o *Orchestrator) handleExecutionReceiptEvent(receiptEvent *insecure.Event) if err != nil { return fmt.Errorf("could not send rpc on channel: %w", err) } + lg.Debug(). + Hex("corrupted_result_id", logging.ID(corruptedResult.ID())). + Hex("corrupted_execution_id", logging.ID(corruptedExecutionId)). + Msg("corrupted result successfully sent to corrupted execution node") } // saves state of attack for further replies - o.state = &attackState{ - originalResult: &receipt.ExecutionResult, - corruptedResult: corruptedResult, - } + o.updateAttackState(&receipt.ExecutionResult, corruptedResult) lg.Info(). Hex("corrupted_result_id", logging.ID(corruptedResult.ID())). Msg("result successfully corrupted") @@ -229,7 +242,8 @@ func (o *Orchestrator) handleChunkDataPackRequestEvent(chunkDataPackRequestEvent return fmt.Errorf("wrong sender role for chunk data pack request: %s", corruptedIdentity.Role.String()) } - if o.state != nil { + if _, _, conducted := o.AttackState(); conducted { + // an attack has already been conducted sent, err := o.replyWithAttestation(chunkDataPackRequestEvent) if err != nil { return fmt.Errorf("could not reply with attestation: %w", err) @@ -263,9 +277,9 @@ func (o *Orchestrator) handleChunkDataPackRequestEvent(chunkDataPackRequestEvent // handleChunkDataPackResponseEvent wintermutes the chunk data pack reply if it belongs to a corrupted result, and is meant to // be sent to an honest verification node. Otherwise, it is passed through. func (o *Orchestrator) handleChunkDataPackResponseEvent(chunkDataPackReplyEvent *insecure.Event) error { - if o.state != nil { - cdpRep := chunkDataPackReplyEvent.FlowProtocolEvent.(*messages.ChunkDataResponse) - + cdpRep := chunkDataPackReplyEvent.FlowProtocolEvent.(*messages.ChunkDataResponse) + if _, _, conducted := o.AttackState(); conducted { + // an attack has already been conducted lg := o.logger.With(). Hex("chunk_id", logging.ID(cdpRep.ChunkDataPack.ChunkID)). Hex("sender_id", logging.ID(chunkDataPackReplyEvent.CorruptedNodeId)). @@ -296,6 +310,10 @@ func (o *Orchestrator) handleChunkDataPackResponseEvent(chunkDataPackReplyEvent if err != nil { return fmt.Errorf("could not passed through chunk data reply: %w", err) } + o.logger.Debug(). + Hex("corrupted_id", logging.ID(chunkDataPackReplyEvent.CorruptedNodeId)). + Hex("chunk_id", logging.ID(cdpRep.ChunkDataPack.ID())). + Msg("chunk data pack response passed through") return nil } @@ -312,7 +330,8 @@ func (o *Orchestrator) handleResultApprovalEvent(resultApprovalEvent *insecure.E Hex("sender_id", logging.ID(resultApprovalEvent.CorruptedNodeId)). Str("target_ids", fmt.Sprintf("%v", resultApprovalEvent.TargetIds)).Logger() - if o.state != nil { + if _, _, conducted := o.AttackState(); conducted { + // an attack has already been conducted if o.state.originalResult.ID() == approval.Body.ExecutionResultID { lg.Info().Msg("wintermuting result approval for original un-corrupted execution result") return nil @@ -379,8 +398,8 @@ func (o *Orchestrator) replyWithAttestation(chunkDataPackRequestEvent *insecure. // AttackState returns the corrupted and original execution results involved in this attack. // Boolean return value determines whether attack conducted. func (o *Orchestrator) AttackState() (flow.ExecutionResult, flow.ExecutionResult, bool) { - o.Lock() - defer o.Unlock() + o.attackStateLock.RLock() + defer o.attackStateLock.RUnlock() if o.state == nil { // no attack yet conducted. @@ -389,3 +408,19 @@ func (o *Orchestrator) AttackState() (flow.ExecutionResult, flow.ExecutionResult return *o.state.corruptedResult, *o.state.originalResult, true } + +func (o *Orchestrator) updateAttackState(originalResult *flow.ExecutionResult, corruptedResult *flow.ExecutionResult) { + o.attackStateLock.Lock() + defer o.attackStateLock.Unlock() + + if o.state != nil { + // based on our testing assumptions, Wintermute attack must be conducted only once, extra attempts + // can be due to a bug. + panic("attempt on conducting an already conducted attack is not allowed") + } + + o.state = &attackState{ + originalResult: originalResult, + corruptedResult: corruptedResult, + } +} diff --git a/insecure/wintermute/attackOrchestrator_test.go b/insecure/wintermute/attackOrchestrator_test.go index f40cf20d920..21dbf808edb 100644 --- a/insecure/wintermute/attackOrchestrator_test.go +++ b/insecure/wintermute/attackOrchestrator_test.go @@ -180,15 +180,15 @@ func testConcurrentExecutionReceipts(t *testing.T, wintermuteOrchestrator.WithAttackNetwork(mockAttackNetwork) // imitates sending events from corrupted execution nodes to the attacker orchestrator. - corruptedEnEventSendWG := sync.WaitGroup{} - corruptedEnEventSendWG.Add(len(eventMap)) + corruptedEnEventSendWG := &sync.WaitGroup{} + l := len(eventMap) + corruptedEnEventSendWG.Add(l) for _, event := range eventMap { event := event // suppress loop variable go func() { err := wintermuteOrchestrator.HandleEventFromCorruptedNode(event) require.NoError(t, err) - corruptedEnEventSendWG.Done() }() } From 9086a7e0ae9f81c9a54d847214633fc7b21bb31b Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 10:05:34 -0400 Subject: [PATCH 128/223] create single context for all test cases in each test --- .../authorized_sender_validator_test.go | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index dd3d3b099fe..4b2f1d5d51b 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -44,6 +44,9 @@ func (s *TestAuthorizedSenderValidatorSuite) SetupTest() { // TestValidatorCallback_AuthorizedSender checks that the call back returned from AuthorizedSenderValidator does not return false positive // validation errors for all possible valid combinations (authorized sender role, message type). func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_AuthorizedSender() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, c := range s.authorizedSenderTestCases { str := fmt.Sprintf("role (%s) should be authorized to send message type (%s) on channel (%s)", c.Identity.Role, c.MessageStr, c.Channel) s.Run(str, func() { @@ -52,9 +55,6 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_AuthorizedSen pid, err := unittest.PeerIDFromFlowID(c.Identity) s.Require().NoError(err) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - msgType, err := validate(ctx, pid, c.Message) s.Require().NoError(err) s.Require().Equal(c.MessageStr, msgType) @@ -69,15 +69,15 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_AuthorizedSen // TestValidatorCallback_UnAuthorizedSender checks that the call back returned from AuthorizedSenderValidator return's ErrUnauthorizedSender // validation error for all possible invalid combinations (unauthorized sender role, message type). func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedSender() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, c := range s.unauthorizedSenderTestCases { str := fmt.Sprintf("role (%s) should not be authorized to send message type (%s) on channel (%s)", c.Identity.Role, c.MessageStr, c.Channel) s.Run(str, func() { pid, err := unittest.PeerIDFromFlowID(c.Identity) s.Require().NoError(err) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - validate := AuthorizedSenderValidator(zerolog.Nop(), c.Channel, c.GetIdentity) msgType, err := validate(ctx, pid, c.Message) s.Require().ErrorIs(err, message.ErrUnauthorizedRole) @@ -93,15 +93,15 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedS // TestValidatorCallback_UnAuthorizedMessageOnChannel for each invalid combination of message type and channel // the call back returned from AuthorizedSenderValidator returns the appropriate error message.ErrUnauthorizedMessageOnChannel. func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedMessageOnChannel() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, c := range s.unauthorizedMessageOnChannelTestCases { str := fmt.Sprintf("message type (%s) should not be authorized to be sent on channel (%s)", c.MessageStr, c.Channel) s.Run(str, func() { pid, err := unittest.PeerIDFromFlowID(c.Identity) s.Require().NoError(err) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - validate := AuthorizedSenderValidator(zerolog.Nop(), c.Channel, c.GetIdentity) msgType, err := validate(ctx, pid, c.Message) @@ -201,10 +201,10 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai s.Require().Equal(pubsub.ValidationReject, pubsubResult) // nil messages are rejected - msgType, err = validate(context.Background(), pid, nil) + msgType, err = validate(ctx, pid, nil) s.Require().ErrorIs(err, ErrUnknownMessageType) s.Require().Equal("", msgType) - pubsubResult = validatePubsub(context.Background(), pid, nil) + pubsubResult = validatePubsub(ctx, pid, nil) s.Require().Equal(pubsub.ValidationReject, pubsubResult) }) From acb33a9f26ab7f4813544dc1e8059db7b5ae7253 Mon Sep 17 00:00:00 2001 From: Leon Yu Date: Tue, 12 Jul 2022 08:01:12 -0700 Subject: [PATCH 129/223] Unskipping join and leave ln test --- integration/tests/epochs/epoch_join_and_leave_ln_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/integration/tests/epochs/epoch_join_and_leave_ln_test.go b/integration/tests/epochs/epoch_join_and_leave_ln_test.go index 560d651553d..5ca72eee9f4 100644 --- a/integration/tests/epochs/epoch_join_and_leave_ln_test.go +++ b/integration/tests/epochs/epoch_join_and_leave_ln_test.go @@ -6,11 +6,9 @@ import ( "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" ) func TestEpochJoinAndLeaveLN(t *testing.T) { - unittest.SkipUnless(t, unittest.TEST_FLAKY, "epochs join/leave tests should be run on an machine with adequate resources") suite.Run(t, new(EpochJoinAndLeaveLNSuite)) } From 826de41d21f89feb79d6999411e94d8ebbafe796 Mon Sep 17 00:00:00 2001 From: lolpuddle Date: Tue, 12 Jul 2022 11:06:29 -0400 Subject: [PATCH 130/223] address specific mutex --- engine/access/rpc/backend/backend_accounts.go | 5 ++- engine/access/rpc/backend/backend_events.go | 6 ++- engine/access/rpc/backend/backend_scripts.go | 5 ++- .../rpc/backend/backend_transactions.go | 17 +++++++-- .../access/rpc/backend/connection_factory.go | 37 ++++++++++++++++--- .../rpc/backend/connection_factory_test.go | 22 +++++------ engine/access/rpc/engine.go | 4 +- 7 files changed, 70 insertions(+), 26 deletions(-) diff --git a/engine/access/rpc/backend/backend_accounts.go b/engine/access/rpc/backend/backend_accounts.go index 9a008a41def..82ab697446c 100644 --- a/engine/access/rpc/backend/backend_accounts.go +++ b/engine/access/rpc/backend/backend_accounts.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/go-multierror" execproto "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/rs/zerolog" + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -159,7 +160,9 @@ func (b *backendAccounts) tryGetAccount(ctx context.Context, execNode *flow.Iden } resp, err := execRPCClient.GetAccountAtBlockID(ctx, &req) if err != nil { - b.connFactory.InvalidateExecutionAPIClient(execNode.Address) + if err == grpc.ErrServerStopped { + b.connFactory.InvalidateExecutionAPIClient(execNode.Address) + } return nil, err } return resp, nil diff --git a/engine/access/rpc/backend/backend_events.go b/engine/access/rpc/backend/backend_events.go index 7b17548edc8..bad58347945 100644 --- a/engine/access/rpc/backend/backend_events.go +++ b/engine/access/rpc/backend/backend_events.go @@ -5,10 +5,10 @@ import ( "encoding/hex" "errors" "fmt" - "github.com/hashicorp/go-multierror" execproto "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/rs/zerolog" + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -213,7 +213,9 @@ func (b *backendEvents) tryGetEvents(ctx context.Context, } resp, err := execRPCClient.GetEventsForBlockIDs(ctx, &req) if err != nil { - b.connFactory.InvalidateExecutionAPIClient(execNode.Address) + if err == grpc.ErrServerStopped { + b.connFactory.InvalidateExecutionAPIClient(execNode.Address) + } return nil, err } return resp, nil diff --git a/engine/access/rpc/backend/backend_scripts.go b/engine/access/rpc/backend/backend_scripts.go index 5789fee56c7..f19b683a301 100644 --- a/engine/access/rpc/backend/backend_scripts.go +++ b/engine/access/rpc/backend/backend_scripts.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/go-multierror" execproto "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/rs/zerolog" + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -171,7 +172,9 @@ func (b *backendScripts) tryExecuteScript(ctx context.Context, execNode *flow.Id } execResp, err := execRPCClient.ExecuteScriptAtBlockID(ctx, &req) if err != nil { - b.connFactory.InvalidateExecutionAPIClient(execNode.Address) + if err == grpc.ErrServerStopped { + b.connFactory.InvalidateExecutionAPIClient(execNode.Address) + } return nil, status.Errorf(status.Code(err), "failed to execute the script on the execution node %s: %v", execNode.String(), err) } return execResp.GetValue(), nil diff --git a/engine/access/rpc/backend/backend_transactions.go b/engine/access/rpc/backend/backend_transactions.go index e8829deaaaf..212c7c1f958 100644 --- a/engine/access/rpc/backend/backend_transactions.go +++ b/engine/access/rpc/backend/backend_transactions.go @@ -11,6 +11,7 @@ import ( "github.com/onflow/flow/protobuf/go/flow/entities" execproto "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/rs/zerolog" + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -151,7 +152,9 @@ func (b *backendTransactions) sendTransactionToCollector(ctx context.Context, err = b.grpcTxSend(ctx, collectionRPC, tx) if err != nil { - b.connFactory.InvalidateAccessAPIClient(collectionNodeAddr) + if err == grpc.ErrServerStopped { + b.connFactory.InvalidateAccessAPIClient(collectionNodeAddr) + } return fmt.Errorf("failed to send transaction to collection node at %s: %v", collectionNodeAddr, err) } return nil @@ -708,7 +711,9 @@ func (b *backendTransactions) tryGetTransactionResult( } resp, err := execRPCClient.GetTransactionResult(ctx, &req) if err != nil { - b.connFactory.InvalidateExecutionAPIClient(execNode.Address) + if err == grpc.ErrServerStopped { + b.connFactory.InvalidateExecutionAPIClient(execNode.Address) + } return nil, err } return resp, err @@ -762,7 +767,9 @@ func (b *backendTransactions) tryGetTransactionResultsByBlockID( } resp, err := execRPCClient.GetTransactionResultsByBlockID(ctx, &req) if err != nil { - b.connFactory.InvalidateExecutionAPIClient(execNode.Address) + if err == grpc.ErrServerStopped { + b.connFactory.InvalidateExecutionAPIClient(execNode.Address) + } return nil, err } return resp, err @@ -817,7 +824,9 @@ func (b *backendTransactions) tryGetTransactionResultByIndex( } resp, err := execRPCClient.GetTransactionResultByIndex(ctx, &req) if err != nil { - b.connFactory.InvalidateExecutionAPIClient(execNode.Address) + if err == grpc.ErrServerStopped { + b.connFactory.InvalidateExecutionAPIClient(execNode.Address) + } return nil, err } return resp, err diff --git a/engine/access/rpc/backend/connection_factory.go b/engine/access/rpc/backend/connection_factory.go index 4ac8ca55059..062d0091fe4 100644 --- a/engine/access/rpc/backend/connection_factory.go +++ b/engine/access/rpc/backend/connection_factory.go @@ -12,6 +12,7 @@ import ( "github.com/onflow/flow/protobuf/go/flow/execution" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/keepalive" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/utils/grpcutils" @@ -52,6 +53,11 @@ type ConnectionFactoryImpl struct { AccessMetrics module.AccessMetrics } +type ConnectionCacheStore struct { + ClientConn *grpc.ClientConn + mutex *sync.Mutex +} + // createConnection creates new gRPC connections to remote node func (cf *ConnectionFactoryImpl) createConnection(address string, timeout time.Duration) (*grpc.ClientConn, error) { @@ -59,6 +65,11 @@ func (cf *ConnectionFactoryImpl) createConnection(address string, timeout time.D timeout = defaultClientTimeout } + keepaliveParams := keepalive.ClientParameters{ + Time: 10 * time.Second, + Timeout: timeout, + } + // ClientConn's default KeepAlive on connections is indefinite, assuming the timeout isn't reached // The connections should be safe to be persisted and reused // https://pkg.go.dev/google.golang.org/grpc#WithKeepaliveParams @@ -67,6 +78,7 @@ func (cf *ConnectionFactoryImpl) createConnection(address string, timeout time.D address, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcutils.DefaultMaxMsgSize)), grpc.WithInsecure(), //nolint:staticcheck + grpc.WithKeepaliveParams(keepaliveParams), WithClientUnaryInterceptor(timeout)) if err != nil { return nil, fmt.Errorf("failed to connect to address %s: %w", address, err) @@ -76,22 +88,37 @@ func (cf *ConnectionFactoryImpl) createConnection(address string, timeout time.D func (cf *ConnectionFactoryImpl) retrieveConnection(grpcAddress string, timeout time.Duration) (*grpc.ClientConn, error) { var conn *grpc.ClientConn + var mutex *sync.Mutex if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { - conn = res.(*grpc.ClientConn) + conn = res.(ConnectionCacheStore).ClientConn + mutex = res.(ConnectionCacheStore).mutex + mutex.Lock() + defer mutex.Unlock() if cf.AccessMetrics != nil { cf.AccessMetrics.ConnectionFromPoolRetrieved() } } if conn == nil || conn.GetState() != connectivity.Ready { + cf.lock.Lock() + // updates to the cache don't trigger evictions; this line closes connections before re-establishing new ones + if conn != nil { + conn.Close() + } var err error conn, err = cf.createConnection(grpcAddress, timeout) if err != nil { return nil, err } - cf.lock.Lock() - // This line ensures that when a connection is renewed, the previously cached connection is evicted and closed - cf.ConnectionsCache.Remove(grpcAddress) - cf.ConnectionsCache.Add(grpcAddress, conn) + + store := ConnectionCacheStore{ + ClientConn: conn, + mutex: new(sync.Mutex), + } + if mutex != nil { + store.mutex = mutex + } + + cf.ConnectionsCache.Add(grpcAddress, store) cf.lock.Unlock() if cf.AccessMetrics != nil { cf.AccessMetrics.TotalConnectionsInPool(uint(cf.ConnectionsCache.Len()), cf.CacheSize) diff --git a/engine/access/rpc/backend/connection_factory_test.go b/engine/access/rpc/backend/connection_factory_test.go index 49bcf14e4ca..0f2473b89c4 100644 --- a/engine/access/rpc/backend/connection_factory_test.go +++ b/engine/access/rpc/backend/connection_factory_test.go @@ -38,7 +38,7 @@ func TestProxyAccessAPI(t *testing.T) { connectionFactory.CollectionGRPCPort = cn.port // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*grpc.ClientConn).Close() + evictedValue.(ConnectionCacheStore).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -77,7 +77,7 @@ func TestProxyExecutionAPI(t *testing.T) { connectionFactory.ExecutionGRPCPort = en.port // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*grpc.ClientConn).Close() + evictedValue.(ConnectionCacheStore).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -116,7 +116,7 @@ func TestProxyAccessAPIConnectionReuse(t *testing.T) { connectionFactory.CollectionGRPCPort = cn.port // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*grpc.ClientConn).Close() + evictedValue.(ConnectionCacheStore).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -135,7 +135,7 @@ func TestProxyAccessAPIConnectionReuse(t *testing.T) { var conn *grpc.ClientConn res, ok := connectionFactory.ConnectionsCache.Get(proxyConnectionFactory.targetAddress) assert.True(t, ok) - conn = res.(*grpc.ClientConn) + conn = res.(ConnectionCacheStore).ClientConn // check if api client can be rebuilt with retrieved connection accessAPIClient := access.NewAccessAPIClient(conn) @@ -162,7 +162,7 @@ func TestProxyExecutionAPIConnectionReuse(t *testing.T) { connectionFactory.ExecutionGRPCPort = en.port // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*grpc.ClientConn).Close() + evictedValue.(ConnectionCacheStore).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -181,7 +181,7 @@ func TestProxyExecutionAPIConnectionReuse(t *testing.T) { var conn *grpc.ClientConn res, ok := connectionFactory.ConnectionsCache.Get(proxyConnectionFactory.targetAddress) assert.True(t, ok) - conn = res.(*grpc.ClientConn) + conn = res.(ConnectionCacheStore).ClientConn // check if api client can be rebuilt with retrieved connection executionAPIClient := execution.NewExecutionAPIClient(conn) @@ -215,7 +215,7 @@ func TestExecutionNodeClientTimeout(t *testing.T) { connectionFactory.ExecutionNodeGRPCTimeout = timeout // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*grpc.ClientConn).Close() + evictedValue.(ConnectionCacheStore).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -257,7 +257,7 @@ func TestCollectionNodeClientTimeout(t *testing.T) { connectionFactory.CollectionNodeGRPCTimeout = timeout // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*grpc.ClientConn).Close() + evictedValue.(ConnectionCacheStore).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -299,7 +299,7 @@ func TestConnectionPoolFull(t *testing.T) { connectionFactory.CollectionGRPCPort = cn1.port // set the connection pool cache size cache, _ := lru.NewWithEvict(2, func(_, evictedValue interface{}) { - evictedValue.(*grpc.ClientConn).Close() + evictedValue.(ConnectionCacheStore).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -368,7 +368,7 @@ func TestConnectionPoolStale(t *testing.T) { // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*grpc.ClientConn).Close() + evictedValue.(ConnectionCacheStore).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -401,7 +401,7 @@ func TestConnectionPoolStale(t *testing.T) { var conn *grpc.ClientConn res, ok := connectionFactory.ConnectionsCache.Get(proxyConnectionFactory.targetAddress) assert.True(t, ok) - conn = res.(*grpc.ClientConn) + conn = res.(ConnectionCacheStore).ClientConn // check if api client can be rebuilt with retrieved connection accessAPIClient := access.NewAccessAPIClient(conn) diff --git a/engine/access/rpc/engine.go b/engine/access/rpc/engine.go index a4a45ff1a0a..fda3109ff8e 100644 --- a/engine/access/rpc/engine.go +++ b/engine/access/rpc/engine.go @@ -86,7 +86,7 @@ func NewBuilder(log zerolog.Logger, executionGRPCPort uint, retryEnabled bool, rpcMetricsEnabled bool, - apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the Access API e.g. Ping->100, GetTransaction->300 + apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the Access API e.g. Ping->100, GetTransaction->300 apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the Access API e.g. Ping->50, GetTransaction->10 ) (*RPCEngineBuilder, error) { @@ -137,7 +137,7 @@ func NewBuilder(log zerolog.Logger, cacheSize = backend.DefaultConnectionPoolSize } cache, err := lru.NewWithEvict(int(cacheSize), func(_, evictedValue interface{}) { - evictedValue.(*grpc.ClientConn).Close() + evictedValue.(backend.ConnectionCacheStore).ClientConn.Close() }) if err != nil { return nil, fmt.Errorf("could not initialize connection pool cache: %w", err) From a04f2a787e8c6b25629a8feac3677ec390f75513 Mon Sep 17 00:00:00 2001 From: lolpuddle Date: Tue, 12 Jul 2022 11:27:13 -0400 Subject: [PATCH 131/223] lint fix + comments --- engine/access/rpc/backend/backend_events.go | 1 + engine/access/rpc/backend/connection_factory.go | 6 ++++++ engine/access/rpc/engine.go | 2 +- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/engine/access/rpc/backend/backend_events.go b/engine/access/rpc/backend/backend_events.go index bad58347945..fbdcdaf65a0 100644 --- a/engine/access/rpc/backend/backend_events.go +++ b/engine/access/rpc/backend/backend_events.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "errors" "fmt" + "github.com/hashicorp/go-multierror" execproto "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/rs/zerolog" diff --git a/engine/access/rpc/backend/connection_factory.go b/engine/access/rpc/backend/connection_factory.go index 062d0091fe4..2a7584bba46 100644 --- a/engine/access/rpc/backend/connection_factory.go +++ b/engine/access/rpc/backend/connection_factory.go @@ -92,6 +92,10 @@ func (cf *ConnectionFactoryImpl) retrieveConnection(grpcAddress string, timeout if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { conn = res.(ConnectionCacheStore).ClientConn mutex = res.(ConnectionCacheStore).mutex + + // we lock this mutex to prevent a scenario where the connection is not good, which will result in + // re-establishing the connection for this address. if the mutex is not locked, we may attempt to re-establish + // the connection multiple times which would result in cache thrashing. mutex.Lock() defer mutex.Unlock() if cf.AccessMetrics != nil { @@ -99,6 +103,8 @@ func (cf *ConnectionFactoryImpl) retrieveConnection(grpcAddress string, timeout } } if conn == nil || conn.GetState() != connectivity.Ready { + // this lock prevents a memory leak where a race condition may occur if 2 requests to a new connection at the + // same address occur. the second add would overwrite the first without closing the connection cf.lock.Lock() // updates to the cache don't trigger evictions; this line closes connections before re-establishing new ones if conn != nil { diff --git a/engine/access/rpc/engine.go b/engine/access/rpc/engine.go index fda3109ff8e..2a22f6d03a1 100644 --- a/engine/access/rpc/engine.go +++ b/engine/access/rpc/engine.go @@ -86,7 +86,7 @@ func NewBuilder(log zerolog.Logger, executionGRPCPort uint, retryEnabled bool, rpcMetricsEnabled bool, - apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the Access API e.g. Ping->100, GetTransaction->300 + apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the Access API e.g. Ping->100, GetTransaction->300 apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the Access API e.g. Ping->50, GetTransaction->10 ) (*RPCEngineBuilder, error) { From 3371ab0287d08eb075805e7ec57c130bd7d61280 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 12:00:07 -0400 Subject: [PATCH 132/223] Update middleware.go --- network/middleware.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/network/middleware.go b/network/middleware.go index d755c0dce33..feb8cd5ddc5 100644 --- a/network/middleware.go +++ b/network/middleware.go @@ -36,15 +36,15 @@ type Middleware interface { // effort. // All errors returned from this function can be considered benign. - Publish(msg *message.Message, channel Channel) error + Publish(msg *message.Message, channel channels.Channel) error // Subscribe subscribes the middleware to a channel. // No errors are expected during normal operation. - Subscribe(channel Channel) error + Subscribe(channel channels.Channel) error // Unsubscribe unsubscribes the middleware from a channel. // All errors returned from this function can be considered benign. - Unsubscribe(channel Channel) error + Unsubscribe(channel channels.Channel) error // UpdateNodeAddresses fetches and updates the addresses of all the authorized participants // in the Flow protocol. From b2608f7e7e37ada3b1661b97b4dd8c215c7a833a Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 12:13:10 -0400 Subject: [PATCH 133/223] Update middleware.go --- network/middleware.go | 1 - 1 file changed, 1 deletion(-) diff --git a/network/middleware.go b/network/middleware.go index feb8cd5ddc5..2c6008977ab 100644 --- a/network/middleware.go +++ b/network/middleware.go @@ -34,7 +34,6 @@ type Middleware interface { // Publish publishes a message on the channel. It models a distributed broadcast where the message is meant for all or // a many nodes subscribing to the channel. It does not guarantee the delivery though, and operates on a best // effort. - // All errors returned from this function can be considered benign. Publish(msg *message.Message, channel channels.Channel) error From f62f93f08af80b92c4715fab39c93d9322636023 Mon Sep 17 00:00:00 2001 From: Jordan Schalm Date: Tue, 12 Jul 2022 09:15:01 -0700 Subject: [PATCH 134/223] Apply suggestions from code review Co-authored-by: Khalil Claybon --- model/flow/sealing_segment.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/model/flow/sealing_segment.go b/model/flow/sealing_segment.go index e0611e02597..42def455f04 100644 --- a/model/flow/sealing_segment.go +++ b/model/flow/sealing_segment.go @@ -44,7 +44,7 @@ import ( // ROOT <- A <- B // // All non-root sealing segments contain more than one block. -// Sealing segments is in ascending height order. +// Sealing segments are in ascending height order. // // In addition to storing the blocks within the sealing segment, as defined above, // the SealingSegment structure also stores any resources which are referenced @@ -91,7 +91,7 @@ func (segment *SealingSegment) Lowest() *Block { // FinalizedSeal returns the seal that seals the lowest block. // Per specification, this seal must be included in a SealingSegment. -// The receiver SealingSegment must be validated. +// The SealingSegment must be validated. // No errors are expected during normal operation. func (segment *SealingSegment) FinalizedSeal() (*Seal, error) { if isRootSegment(segment.LatestSeals) { @@ -358,7 +358,7 @@ func (builder *SealingSegmentBuilder) validateSegment() error { // validate the latest seal is for the lowest block _, err := findLatestSealForLowestBlock(builder.blocks, builder.latestSeals) if err != nil { - return fmt.Errorf("sealing segment missing (block_id=%x) highest (block_id%x) %v: %w", builder.lowest().ID(), builder.highest().ID(), err, ErrSegmentMissingSeal) + return fmt.Errorf("sealing segment missing seal (lowest block id: %x) (highest block id: %x) %v: %w", builder.lowest().ID(), builder.highest().ID(), err, ErrSegmentMissingSeal) } return nil From 900f3478270351e9e9847ec957fc350e3c58c6dc Mon Sep 17 00:00:00 2001 From: Jordan Schalm Date: Tue, 12 Jul 2022 09:19:43 -0700 Subject: [PATCH 135/223] Apply suggestions from code review Co-authored-by: Khalil Claybon --- model/flow/sealing_segment.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/model/flow/sealing_segment.go b/model/flow/sealing_segment.go index 42def455f04..aca1349df92 100644 --- a/model/flow/sealing_segment.go +++ b/model/flow/sealing_segment.go @@ -54,10 +54,10 @@ import ( // * results referenced by seals within segment payloads // * seals which represent the latest state commitment as of a segment block type SealingSegment struct { - // Blocks contains the chain segment blocks in ascending height order. + // Blocks contain the chain segment blocks in ascending height order. Blocks []*Block - // ExecutionResults contains any results which are referenced by receipts + // ExecutionResults contain any results which are referenced by receipts // or seals in the sealing segment, but not included in any segment block // payloads. // From 3a75d94e272ba53891a9ef8bad087120ee6f938d Mon Sep 17 00:00:00 2001 From: lolpuddle Date: Tue, 12 Jul 2022 12:51:09 -0400 Subject: [PATCH 136/223] error check --- engine/access/rpc/backend/backend_accounts.go | 3 +-- engine/access/rpc/backend/backend_events.go | 3 +-- engine/access/rpc/backend/backend_scripts.go | 3 +-- engine/access/rpc/backend/backend_transactions.go | 9 ++++----- 4 files changed, 7 insertions(+), 11 deletions(-) diff --git a/engine/access/rpc/backend/backend_accounts.go b/engine/access/rpc/backend/backend_accounts.go index 82ab697446c..3c3ac5b080b 100644 --- a/engine/access/rpc/backend/backend_accounts.go +++ b/engine/access/rpc/backend/backend_accounts.go @@ -7,7 +7,6 @@ import ( "github.com/hashicorp/go-multierror" execproto "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/rs/zerolog" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -160,7 +159,7 @@ func (b *backendAccounts) tryGetAccount(ctx context.Context, execNode *flow.Iden } resp, err := execRPCClient.GetAccountAtBlockID(ctx, &req) if err != nil { - if err == grpc.ErrServerStopped { + if status.Code(err) == codes.Unavailable { b.connFactory.InvalidateExecutionAPIClient(execNode.Address) } return nil, err diff --git a/engine/access/rpc/backend/backend_events.go b/engine/access/rpc/backend/backend_events.go index fbdcdaf65a0..cc047606b3a 100644 --- a/engine/access/rpc/backend/backend_events.go +++ b/engine/access/rpc/backend/backend_events.go @@ -9,7 +9,6 @@ import ( "github.com/hashicorp/go-multierror" execproto "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/rs/zerolog" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -214,7 +213,7 @@ func (b *backendEvents) tryGetEvents(ctx context.Context, } resp, err := execRPCClient.GetEventsForBlockIDs(ctx, &req) if err != nil { - if err == grpc.ErrServerStopped { + if status.Code(err) == codes.Unavailable { b.connFactory.InvalidateExecutionAPIClient(execNode.Address) } return nil, err diff --git a/engine/access/rpc/backend/backend_scripts.go b/engine/access/rpc/backend/backend_scripts.go index f19b683a301..ba0c72ed726 100644 --- a/engine/access/rpc/backend/backend_scripts.go +++ b/engine/access/rpc/backend/backend_scripts.go @@ -10,7 +10,6 @@ import ( "github.com/hashicorp/go-multierror" execproto "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/rs/zerolog" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -172,7 +171,7 @@ func (b *backendScripts) tryExecuteScript(ctx context.Context, execNode *flow.Id } execResp, err := execRPCClient.ExecuteScriptAtBlockID(ctx, &req) if err != nil { - if err == grpc.ErrServerStopped { + if status.Code(err) == codes.Unavailable { b.connFactory.InvalidateExecutionAPIClient(execNode.Address) } return nil, status.Errorf(status.Code(err), "failed to execute the script on the execution node %s: %v", execNode.String(), err) diff --git a/engine/access/rpc/backend/backend_transactions.go b/engine/access/rpc/backend/backend_transactions.go index 212c7c1f958..6c7c7ef5e61 100644 --- a/engine/access/rpc/backend/backend_transactions.go +++ b/engine/access/rpc/backend/backend_transactions.go @@ -11,7 +11,6 @@ import ( "github.com/onflow/flow/protobuf/go/flow/entities" execproto "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/rs/zerolog" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -152,7 +151,7 @@ func (b *backendTransactions) sendTransactionToCollector(ctx context.Context, err = b.grpcTxSend(ctx, collectionRPC, tx) if err != nil { - if err == grpc.ErrServerStopped { + if status.Code(err) == codes.Unavailable { b.connFactory.InvalidateAccessAPIClient(collectionNodeAddr) } return fmt.Errorf("failed to send transaction to collection node at %s: %v", collectionNodeAddr, err) @@ -711,7 +710,7 @@ func (b *backendTransactions) tryGetTransactionResult( } resp, err := execRPCClient.GetTransactionResult(ctx, &req) if err != nil { - if err == grpc.ErrServerStopped { + if status.Code(err) == codes.Unavailable { b.connFactory.InvalidateExecutionAPIClient(execNode.Address) } return nil, err @@ -767,7 +766,7 @@ func (b *backendTransactions) tryGetTransactionResultsByBlockID( } resp, err := execRPCClient.GetTransactionResultsByBlockID(ctx, &req) if err != nil { - if err == grpc.ErrServerStopped { + if status.Code(err) == codes.Unavailable { b.connFactory.InvalidateExecutionAPIClient(execNode.Address) } return nil, err @@ -824,7 +823,7 @@ func (b *backendTransactions) tryGetTransactionResultByIndex( } resp, err := execRPCClient.GetTransactionResultByIndex(ctx, &req) if err != nil { - if err == grpc.ErrServerStopped { + if status.Code(err) == codes.Unavailable { b.connFactory.InvalidateExecutionAPIClient(execNode.Address) } return nil, err From b4a3c3d2158283922640b973dd00e7d900f57577 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 12:56:12 -0400 Subject: [PATCH 137/223] Update middleware.go --- network/p2p/middleware.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/network/p2p/middleware.go b/network/p2p/middleware.go index bf7c39304fd..abb00c5983d 100644 --- a/network/p2p/middleware.go +++ b/network/p2p/middleware.go @@ -545,7 +545,7 @@ func (m *Middleware) handleIncomingStream(s libp2pnetwork.Stream) { // Subscribe subscribes the middleware to a channel. // No errors are expected during normal operation. -func (m *Middleware) Subscribe(channel network.Channel) error { +func (m *Middleware) Subscribe(channel channels.Channel) error { topic := channels.TopicFromChannel(channel, m.rootBlockID) @@ -589,8 +589,8 @@ func (m *Middleware) Subscribe(channel network.Channel) error { // - the libP2P node fails to unsubscribe to the topic created from the provided channel. // // All errors returned from this function can be considered benign. -func (m *Middleware) Unsubscribe(channel network.Channel) error { - topic := network.TopicFromChannel(channel, m.rootBlockID) +func (m *Middleware) Unsubscribe(channel channels.Channel) error { + topic := channels.TopicFromChannel(channel, m.rootBlockID) err := m.libP2PNode.UnSubscribe(topic) if err != nil { return fmt.Errorf("failed to unsubscribe from channel (%s): %w", channel, err) @@ -652,7 +652,7 @@ func (m *Middleware) processMessage(msg *message.Message, decodedMsgPayload inte // - the libP2P node fails to publish the message. // // All errors returned from this function can be considered benign. -func (m *Middleware) Publish(msg *message.Message, channel network.Channel) error { +func (m *Middleware) Publish(msg *message.Message, channel channels.Channel) error { m.log.Debug().Str("channel", channel.String()).Interface("msg", msg).Msg("publishing new message") // convert the message to bytes to be put on the wire. From 160df808e0bcce600d3fab00c8b49f6491904b60 Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Tue, 12 Jul 2022 13:12:21 -0400 Subject: [PATCH 138/223] remove timeout case --- engine/common/follower/engine.go | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/engine/common/follower/engine.go b/engine/common/follower/engine.go index d0795cdaa7b..3759d5c70c6 100644 --- a/engine/common/follower/engine.go +++ b/engine/common/follower/engine.go @@ -392,21 +392,7 @@ func (e *Engine) processBlockAndDescendants(ctx context.Context, proposal *messa // submit the model to follower for processing if inRangeBlockResponse { - select { - case <-e.follower.SubmitProposal(header, parent.View): - // after submitting proposal to hotstuff, then hotstuff will start processing block n, and follower - // engine is concurrently processing block n + 1. - // however follower engine will fail to process block n + 1 if block n is not saved in protocol state. - // Block n is only saved in protocol state when hotstuff finishes processing block n. - // In order to ensure follower engine don't process block n + 1 too early, we wait until hotstuff finish - // processing block n. - // this wait is only needed when processing range block response, since blocks are processed in order. - break - case <-time.After(time.Millisecond * 200): - // this shouldn't happen very often. 99.8% of proposals are processed within 200ms - e.log.Warn().Msg("HotStuffFollower SubmitProposal timeout") - break - } + <-e.follower.SubmitProposal(header, parent.View) } else { // ignore returned channel to avoid waiting e.follower.SubmitProposal(header, parent.View) From 0fc1540f2b87f1ca4faf5c9ef83e210d47674168 Mon Sep 17 00:00:00 2001 From: lolpuddle Date: Tue, 12 Jul 2022 13:12:39 -0400 Subject: [PATCH 139/223] lock and invalidate --- engine/access/rpc/backend/connection_factory.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/engine/access/rpc/backend/connection_factory.go b/engine/access/rpc/backend/connection_factory.go index 2a7584bba46..62f404c6d85 100644 --- a/engine/access/rpc/backend/connection_factory.go +++ b/engine/access/rpc/backend/connection_factory.go @@ -151,6 +151,11 @@ func (cf *ConnectionFactoryImpl) GetAccessAPIClient(address string) (access.Acce func (cf *ConnectionFactoryImpl) InvalidateAccessAPIClient(address string) bool { grpcAddress, err := getGRPCAddress(address, cf.CollectionGRPCPort) + if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { + store := res.(ConnectionCacheStore) + store.mutex.Lock() + defer store.mutex.Unlock() + } if err != nil { return true } @@ -175,6 +180,11 @@ func (cf *ConnectionFactoryImpl) GetExecutionAPIClient(address string) (executio func (cf *ConnectionFactoryImpl) InvalidateExecutionAPIClient(address string) bool { grpcAddress, err := getGRPCAddress(address, cf.ExecutionGRPCPort) + if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { + store := res.(ConnectionCacheStore) + store.mutex.Lock() + defer store.mutex.Unlock() + } if err != nil { return true } From 8fece0d350ba98b5e9f3526fecf4d365a0fcfa03 Mon Sep 17 00:00:00 2001 From: danielholmes839 Date: Tue, 12 Jul 2022 13:31:08 -0400 Subject: [PATCH 140/223] unused import --- engine/common/follower/engine.go | 1 - 1 file changed, 1 deletion(-) diff --git a/engine/common/follower/engine.go b/engine/common/follower/engine.go index 3759d5c70c6..f404ee32bf5 100644 --- a/engine/common/follower/engine.go +++ b/engine/common/follower/engine.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "time" "github.com/hashicorp/go-multierror" "github.com/rs/zerolog" From 62d6a270a998c5907e728a0dca43c38375f402fd Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 14:22:57 -0400 Subject: [PATCH 141/223] check hooked logger logs directly rather than rely on hook calls - add err directly to log message string to allow checking message in tests - add note about ErrIdentityUnverified --- network/p2p/topic_validator_test.go | 81 ++++++------------- network/slashing/violations_consumer.go | 11 ++- .../pubsub/authorized_sender_validator.go | 9 ++- 3 files changed, 35 insertions(+), 66 deletions(-) diff --git a/network/p2p/topic_validator_test.go b/network/p2p/topic_validator_test.go index f7cd0a17841..440ae0f8f4a 100644 --- a/network/p2p/topic_validator_test.go +++ b/network/p2p/topic_validator_test.go @@ -2,9 +2,7 @@ package p2p_test import ( "context" - "os" "sync" - "sync/atomic" "testing" "time" @@ -25,14 +23,9 @@ import ( // TestTopicValidator_Unstaked tests that the libP2P node topic validator rejects unauthenticated messages on non-public channels (unstaked) func TestTopicValidator_Unstaked(t *testing.T) { - // setup hooked logger - var hookCalls uint64 - hook := zerolog.HookFunc(func(e *zerolog.Event, level zerolog.Level, message string) { - if level == zerolog.WarnLevel { - atomic.AddUint64(&hookCalls, 1) - } - }) - logger := zerolog.New(os.Stdout).Level(zerolog.WarnLevel).Hook(hook) + // create a hooked logger + var hook unittest.LoggerHook + logger, hook := unittest.HookedLogger() sporkId := unittest.IdentifierFixture() @@ -96,22 +89,14 @@ func TestTopicValidator_Unstaked(t *testing.T) { unittest.RequireReturnsBefore(t, wg.Wait, 5*time.Second, "could not receive message on time") - // expecting 1 warn calls for each rejected message from unauthenticated node - require.Equalf(t, uint64(1), hookCalls, "expected 1 warning to be logged") + // ensure the correct error is contained in the logged error + require.Contains(t, hook.Logs(), "filtering message from un-allowed peer") } // TestTopicValidator_PublicChannel tests that the libP2P node topic validator does not reject unauthenticated messages on public channels func TestTopicValidator_PublicChannel(t *testing.T) { - // setup hooked logger - var hookCalls uint64 - hook := zerolog.HookFunc(func(e *zerolog.Event, level zerolog.Level, message string) { - if level == zerolog.WarnLevel { - atomic.AddUint64(&hookCalls, 1) - } - }) - logger := zerolog.New(os.Stdout).Level(zerolog.WarnLevel).Hook(hook) - sporkId := unittest.IdentifierFixture() + logger := unittest.Logger() nodeFixtureCtx, nodeFixtureCtxCancel := context.WithCancel(context.Background()) defer nodeFixtureCtxCancel() @@ -160,23 +145,15 @@ func TestTopicValidator_PublicChannel(t *testing.T) { checkReceive(timedCtx, t, data1, sub2, nil, true) unittest.RequireReturnsBefore(t, wg.Wait, 5*time.Second, "could not receive message on time") - - // expecting no warn calls for rejected messages - require.Equalf(t, uint64(0), hookCalls, "expected 0 warning to be logged") } // TestAuthorizedSenderValidator_Unauthorized tests that the authorized sender validator rejects messages from nodes that are not authorized to send the message func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { - sporkId := unittest.IdentifierFixture() + // create a hooked logger + var hook unittest.LoggerHook + logger, hook := unittest.HookedLogger() - // setup hooked logger - var hookCalls uint64 - hook := zerolog.HookFunc(func(e *zerolog.Event, level zerolog.Level, message string) { - if level == zerolog.ErrorLevel { - atomic.AddUint64(&hookCalls, 1) - } - }) - logger := zerolog.New(os.Stdout).Level(zerolog.ErrorLevel).Hook(hook) + sporkId := unittest.IdentifierFixture() nodeFixtureCtx, nodeFixtureCtxCancel := context.WithCancel(context.Background()) defer nodeFixtureCtxCancel() @@ -269,12 +246,16 @@ func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { unittest.RequireReturnsBefore(t, wg.Wait, 5*time.Second, "could not receive message on time") - // expecting 1 error log for each rejected message from unauthorized node - require.Equalf(t, uint64(1), hookCalls, "expected 1 warning to be logged") + // ensure the correct error is contained in the logged error + require.Contains(t, hook.Logs(), validator.ErrUnauthorizedSender.Error()) } // TestAuthorizedSenderValidator_Authorized tests that the authorized sender validator rejects messages being sent on the wrong channel func TestAuthorizedSenderValidator_InvalidMsg(t *testing.T) { + // create a hooked logger + var hook unittest.LoggerHook + logger, hook := unittest.HookedLogger() + sporkId := unittest.IdentifierFixture() nodeFixtureCtx, nodeFixtureCtxCancel := context.WithCancel(context.Background()) @@ -291,15 +272,6 @@ func TestAuthorizedSenderValidator_InvalidMsg(t *testing.T) { translator, err := p2p.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) - // setup hooked logger - var hookCalls uint64 - hook := zerolog.HookFunc(func(e *zerolog.Event, level zerolog.Level, message string) { - if level == zerolog.ErrorLevel { - atomic.AddUint64(&hookCalls, 1) - } - }) - logger := zerolog.New(os.Stdout).Level(zerolog.ErrorLevel).Hook(hook) - authorizedSenderValidator := validator.AuthorizedSenderMessageValidator(logger, channel, func(pid peer.ID) (*flow.Identity, bool) { fid, err := translator.GetFlowID(pid) if err != nil { @@ -343,12 +315,16 @@ func TestAuthorizedSenderValidator_InvalidMsg(t *testing.T) { unittest.RequireReturnsBefore(t, wg.Wait, 5*time.Second, "could not receive message on time") - // expecting 1 error log for each rejected message from ejected node - require.Equalf(t, uint64(1), hookCalls, "expected 1 warning to be logged") + // ensure the correct error is contained in the logged error + require.Contains(t, hook.Logs(), message.ErrUnauthorizedMessageOnChannel.Error()) } // TestAuthorizedSenderValidator_Ejected tests that the authorized sender validator rejects messages from nodes that are ejected func TestAuthorizedSenderValidator_Ejected(t *testing.T) { + // create a hooked logger + var hook unittest.LoggerHook + logger, hook := unittest.HookedLogger() + sporkId := unittest.IdentifierFixture() nodeFixtureCtx, nodeFixtureCtxCancel := context.WithCancel(context.Background()) @@ -365,15 +341,6 @@ func TestAuthorizedSenderValidator_Ejected(t *testing.T) { translator, err := p2p.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) - // setup hooked logger - var hookCalls uint64 - hook := zerolog.HookFunc(func(e *zerolog.Event, level zerolog.Level, message string) { - if level == zerolog.ErrorLevel { - atomic.AddUint64(&hookCalls, 1) - } - }) - logger := zerolog.New(os.Stdout).Level(zerolog.ErrorLevel).Hook(hook) - authorizedSenderValidator := validator.AuthorizedSenderMessageValidator(logger, channel, func(pid peer.ID) (*flow.Identity, bool) { fid, err := translator.GetFlowID(pid) if err != nil { @@ -439,8 +406,8 @@ func TestAuthorizedSenderValidator_Ejected(t *testing.T) { unittest.RequireReturnsBefore(t, wg.Wait, 5*time.Second, "could not receive message on time") - // expecting 1 warn calls for each rejected message from ejected node - require.Equalf(t, uint64(1), hookCalls, "expected 1 warning to be logged") + // ensure the correct error is contained in the logged error + require.Contains(t, hook.Logs(), validator.ErrSenderEjected.Error()) } // TestAuthorizedSenderValidator_ClusterChannel tests that the authorized sender validator correctly validates messages sent on cluster channels diff --git a/network/slashing/violations_consumer.go b/network/slashing/violations_consumer.go index 8564aec3f11..00cf18cc87e 100644 --- a/network/slashing/violations_consumer.go +++ b/network/slashing/violations_consumer.go @@ -1,6 +1,8 @@ package slashing import ( + "fmt" + "github.com/rs/zerolog" "github.com/onflow/flow-go/utils/logging" @@ -28,35 +30,32 @@ func NewSlashingViolationsConsumer(log zerolog.Logger) *SlashingViolationsConsum // OnUnAuthorizedSenderError logs a warning for unauthorized sender error func (c *SlashingViolationsConsumer) OnUnAuthorizedSenderError(identity *flow.Identity, peerID, msgType string, err error) { c.log.Error(). - Err(err). Str("peer_id", peerID). Str("role", identity.Role.String()). Hex("sender_id", logging.ID(identity.NodeID)). Str("message_type", msgType). Str("offense", unAuthorizedSenderViolation). - Msg("potential slashable offense") + Msg(fmt.Sprintf("potential slashable offense: %s", err)) } // OnUnknownMsgTypeError logs a warning for unknown message type error func (c *SlashingViolationsConsumer) OnUnknownMsgTypeError(identity *flow.Identity, peerID, msgType string, err error) { c.log.Error(). - Err(err). Str("peer_id", peerID). Str("role", identity.Role.String()). Hex("sender_id", logging.ID(identity.NodeID)). Str("message_type", msgType). Str("offense", unknownMsgTypeViolation). - Msg("potential slashable offense") + Msg(fmt.Sprintf("potential slashable offense: %s", err)) } // OnSenderEjectedError logs a warning for sender ejected error func (c *SlashingViolationsConsumer) OnSenderEjectedError(identity *flow.Identity, peerID, msgType string, err error) { c.log.Error(). - Err(err). Str("peer_id", peerID). Str("role", identity.Role.String()). Hex("sender_id", logging.ID(identity.NodeID)). Str("message_type", msgType). Str("offense", senderEjectedViolation). - Msg("potential slashable offense") + Msg(fmt.Sprintf("potential slashable offense: %s", err)) } diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index 4d881c612fe..1eaa10acf1a 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -40,10 +40,13 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel channels.Channel, get slashingViolationsConsumer := slashing.NewSlashingViolationsConsumer(log) return func(ctx context.Context, from peer.ID, msg interface{}) (string, error) { + // NOTE: messages from unstaked nodes should be reject by the libP2P node topic validator + // before they reach message validators. If a message from a unstaked gets to this point + // something terrible went wrong. identity, ok := getIdentity(from) if !ok { - log.Error().Err(ErrIdentityUnverified).Str("peer_id", from.String()).Msg("rejecting message") - return "", ErrUnauthorizedSender + log.Error().Str("peer_id", from.String()).Msg(fmt.Sprintf("rejecting message: %s", ErrIdentityUnverified)) + return "", ErrIdentityUnverified } msgType, err := isAuthorizedSender(identity, channel, msg) @@ -113,7 +116,7 @@ func isAuthorizedSender(identity *flow.Identity, channel channels.Channel, msg i } if err := conf.IsAuthorized(identity.Role, channel); err != nil { - return conf.Name, fmt.Errorf("%w: %s", err, ErrUnauthorizedSender) + return conf.Name, fmt.Errorf("%w: %s", ErrUnauthorizedSender, err) } return conf.Name, nil From 7ad61328d7b8fd66ee848943592f491cbe9a38c0 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 14:43:10 -0400 Subject: [PATCH 142/223] use sentinel messages returned from message authorization config directly do not wrap them --- network/p2p/topic_validator_test.go | 2 +- .../pubsub/authorized_sender_validator.go | 19 +++++---- .../authorized_sender_validator_test.go | 39 +++++++++---------- 3 files changed, 29 insertions(+), 31 deletions(-) diff --git a/network/p2p/topic_validator_test.go b/network/p2p/topic_validator_test.go index 440ae0f8f4a..963ab6e2f9b 100644 --- a/network/p2p/topic_validator_test.go +++ b/network/p2p/topic_validator_test.go @@ -247,7 +247,7 @@ func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { unittest.RequireReturnsBefore(t, wg.Wait, 5*time.Second, "could not receive message on time") // ensure the correct error is contained in the logged error - require.Contains(t, hook.Logs(), validator.ErrUnauthorizedSender.Error()) + require.Contains(t, hook.Logs(), message.ErrUnauthorizedRole.Error()) } // TestAuthorizedSenderValidator_Authorized tests that the authorized sender validator rejects messages being sent on the wrong channel diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index 1eaa10acf1a..5b44115b691 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -18,9 +18,7 @@ import ( ) var ( - ErrUnauthorizedSender = errors.New("validation failed: sender is not authorized to send this message type") ErrSenderEjected = errors.New("validation failed: sender is an ejected node") - ErrUnknownMessageType = errors.New("validation failed: failed to get message auth config") ErrIdentityUnverified = errors.New("validation failed: could not verify identity of sender") ) @@ -53,12 +51,12 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel channels.Channel, get switch { case err == nil: return msgType, nil - case errors.Is(err, ErrUnauthorizedSender): + case errors.Is(err, message.ErrUnauthorizedMessageOnChannel) || errors.Is(err, message.ErrUnauthorizedRole): slashingViolationsConsumer.OnUnAuthorizedSenderError(identity, from.String(), msgType, err) - return msgType, ErrUnauthorizedSender - case errors.Is(err, ErrUnknownMessageType): + return msgType, err + case errors.Is(err, message.ErrUnknownMsgType): slashingViolationsConsumer.OnUnknownMsgTypeError(identity, from.String(), msgType, err) - return msgType, ErrUnknownMessageType + return msgType, err case errors.Is(err, ErrSenderEjected): slashingViolationsConsumer.OnSenderEjectedError(identity, from.String(), msgType, err) return msgType, ErrSenderEjected @@ -97,8 +95,9 @@ func AuthorizedSenderMessageValidator(log zerolog.Logger, channel channels.Chann // B. The sender role is authorized to send message on channel. // Expected error returns during normal operations: // * ErrSenderEjected: if identity of sender is ejected from the network -// * ErrUnknownMessageType: if the message type does not have an auth config -// * ErrUnauthorizedSender: if the sender is not authorized to send message on the channel +// * message.ErrUnknownMsgType if message auth config us not found for the msg +// * message.ErrUnauthorizedMessageOnChannel if msg is not authorized to be sent on channel +// * message.ErrUnauthorizedRole if sender role is not authorized to send msg func isAuthorizedSender(identity *flow.Identity, channel channels.Channel, msg interface{}) (string, error) { if identity.Ejected { return "", ErrSenderEjected @@ -107,7 +106,7 @@ func isAuthorizedSender(identity *flow.Identity, channel channels.Channel, msg i // get message auth config conf, err := message.GetMessageAuthConfig(msg) if err != nil { - return "", fmt.Errorf("%s: %w", err, ErrUnknownMessageType) + return "", err } // handle special case for cluster prefixed channels @@ -116,7 +115,7 @@ func isAuthorizedSender(identity *flow.Identity, channel channels.Channel, msg i } if err := conf.IsAuthorized(identity.Role, channel); err != nil { - return conf.Name, fmt.Errorf("%w: %s", ErrUnauthorizedSender, err) + return conf.Name, err } return conf.Name, nil diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index 4b2f1d5d51b..2685628cfef 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -7,7 +7,6 @@ import ( "github.com/libp2p/go-libp2p-core/peer" pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/rs/zerolog" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/model/flow" @@ -50,7 +49,7 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_AuthorizedSen for _, c := range s.authorizedSenderTestCases { str := fmt.Sprintf("role (%s) should be authorized to send message type (%s) on channel (%s)", c.Identity.Role, c.MessageStr, c.Channel) s.Run(str, func() { - validate := AuthorizedSenderValidator(zerolog.Nop(), c.Channel, c.GetIdentity) + validate := AuthorizedSenderValidator(unittest.Logger(), c.Channel, c.GetIdentity) pid, err := unittest.PeerIDFromFlowID(c.Identity) s.Require().NoError(err) @@ -59,7 +58,7 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_AuthorizedSen s.Require().NoError(err) s.Require().Equal(c.MessageStr, msgType) - validatePubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), c.Channel, c.GetIdentity) + validatePubsub := AuthorizedSenderMessageValidator(unittest.Logger(), c.Channel, c.GetIdentity) pubsubResult := validatePubsub(ctx, pid, c.Message) s.Require().Equal(pubsub.ValidationAccept, pubsubResult) }) @@ -78,12 +77,12 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedS pid, err := unittest.PeerIDFromFlowID(c.Identity) s.Require().NoError(err) - validate := AuthorizedSenderValidator(zerolog.Nop(), c.Channel, c.GetIdentity) + validate := AuthorizedSenderValidator(unittest.Logger(), c.Channel, c.GetIdentity) msgType, err := validate(ctx, pid, c.Message) s.Require().ErrorIs(err, message.ErrUnauthorizedRole) s.Require().Equal(c.MessageStr, msgType) - validatePubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), c.Channel, c.GetIdentity) + validatePubsub := AuthorizedSenderMessageValidator(unittest.Logger(), c.Channel, c.GetIdentity) pubsubResult := validatePubsub(ctx, pid, c.Message) s.Require().Equal(pubsub.ValidationReject, pubsubResult) }) @@ -102,13 +101,13 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedM pid, err := unittest.PeerIDFromFlowID(c.Identity) s.Require().NoError(err) - validate := AuthorizedSenderValidator(zerolog.Nop(), c.Channel, c.GetIdentity) + validate := AuthorizedSenderValidator(unittest.Logger(), c.Channel, c.GetIdentity) msgType, err := validate(ctx, pid, c.Message) s.Require().ErrorIs(err, message.ErrUnauthorizedMessageOnChannel) s.Require().Equal(c.MessageStr, msgType) - validatePubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), c.Channel, c.GetIdentity) + validatePubsub := AuthorizedSenderMessageValidator(unittest.Logger(), c.Channel, c.GetIdentity) pubsubResult := validatePubsub(ctx, pid, c.Message) s.Require().Equal(pubsub.ValidationReject, pubsubResult) }) @@ -129,22 +128,22 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ClusterPrefix s.Require().NoError(err) // validate collection consensus cluster - validateCollConsensus := AuthorizedSenderValidator(zerolog.Nop(), channels.ConsensusCluster(clusterID), getIdentityFunc) + validateCollConsensus := AuthorizedSenderValidator(unittest.Logger(), channels.ConsensusCluster(clusterID), getIdentityFunc) msgType, err := validateCollConsensus(ctx, pid, &messages.ClusterBlockResponse{}) s.Require().NoError(err) s.Require().Equal(message.ClusterBlockResponse, msgType) - validateCollConsensusPubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), channels.ConsensusCluster(clusterID), getIdentityFunc) + validateCollConsensusPubsub := AuthorizedSenderMessageValidator(unittest.Logger(), channels.ConsensusCluster(clusterID), getIdentityFunc) pubsubResult := validateCollConsensusPubsub(ctx, pid, &messages.ClusterBlockResponse{}) s.Require().Equal(pubsub.ValidationAccept, pubsubResult) // validate collection sync cluster - validateSyncCluster := AuthorizedSenderValidator(zerolog.Nop(), channels.SyncCluster(clusterID), getIdentityFunc) + validateSyncCluster := AuthorizedSenderValidator(unittest.Logger(), channels.SyncCluster(clusterID), getIdentityFunc) msgType, err = validateSyncCluster(ctx, pid, &messages.SyncRequest{}) s.Require().NoError(err) s.Require().Equal(message.SyncRequest, msgType) - validateSyncClusterPubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), channels.SyncCluster(clusterID), getIdentityFunc) + validateSyncClusterPubsub := AuthorizedSenderMessageValidator(unittest.Logger(), channels.SyncCluster(clusterID), getIdentityFunc) pubsubResult = validateSyncClusterPubsub(ctx, pid, &messages.SyncRequest{}) s.Require().Equal(pubsub.ValidationAccept, pubsubResult) } @@ -161,12 +160,12 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai pid, err := unittest.PeerIDFromFlowID(identity) s.Require().NoError(err) - validate := AuthorizedSenderValidator(zerolog.Nop(), channels.SyncCommittee, getIdentityFunc) + validate := AuthorizedSenderValidator(unittest.Logger(), channels.SyncCommittee, getIdentityFunc) msgType, err := validate(ctx, pid, &messages.SyncRequest{}) s.Require().ErrorIs(err, ErrSenderEjected) s.Require().Equal("", msgType) - validatePubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), channels.SyncCommittee, getIdentityFunc) + validatePubsub := AuthorizedSenderMessageValidator(unittest.Logger(), channels.SyncCommittee, getIdentityFunc) pubsubResult := validatePubsub(ctx, pid, &messages.SyncRequest{}) s.Require().Equal(pubsub.ValidationReject, pubsubResult) }) @@ -190,19 +189,19 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai pid, err := unittest.PeerIDFromFlowID(identity) s.Require().NoError(err) - validate := AuthorizedSenderValidator(zerolog.Nop(), channels.ConsensusCommittee, getIdentityFunc) - validatePubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), channels.ConsensusCommittee, getIdentityFunc) + validate := AuthorizedSenderValidator(unittest.Logger(), channels.ConsensusCommittee, getIdentityFunc) + validatePubsub := AuthorizedSenderMessageValidator(unittest.Logger(), channels.ConsensusCommittee, getIdentityFunc) // unknown message types are rejected msgType, err := validate(ctx, pid, m) - s.Require().ErrorIs(err, ErrUnknownMessageType) + s.Require().ErrorIs(err, message.ErrUnknownMsgType) s.Require().Equal("", msgType) pubsubResult := validatePubsub(ctx, pid, m) s.Require().Equal(pubsub.ValidationReject, pubsubResult) // nil messages are rejected msgType, err = validate(ctx, pid, nil) - s.Require().ErrorIs(err, ErrUnknownMessageType) + s.Require().ErrorIs(err, message.ErrUnknownMsgType) s.Require().Equal("", msgType) pubsubResult = validatePubsub(ctx, pid, nil) s.Require().Equal(pubsub.ValidationReject, pubsubResult) @@ -220,12 +219,12 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai pid, err := unittest.PeerIDFromFlowID(identity) s.Require().NoError(err) - validate := AuthorizedSenderValidator(zerolog.Nop(), channels.SyncCommittee, getIdentityFunc) + validate := AuthorizedSenderValidator(unittest.Logger(), channels.SyncCommittee, getIdentityFunc) msgType, err := validate(ctx, pid, &messages.SyncRequest{}) - s.Require().ErrorIs(err, ErrUnauthorizedSender) + s.Require().ErrorIs(err, ErrIdentityUnverified) s.Require().Equal("", msgType) - validatePubsub := AuthorizedSenderMessageValidator(zerolog.Nop(), channels.SyncCommittee, getIdentityFunc) + validatePubsub := AuthorizedSenderMessageValidator(unittest.Logger(), channels.SyncCommittee, getIdentityFunc) pubsubResult := validatePubsub(ctx, pid, &messages.SyncRequest{}) s.Require().Equal(pubsub.ValidationReject, pubsubResult) }) From 1cc9b8ca0c755b4f59876b46bbc70e95ba9747ca Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 14:47:03 -0400 Subject: [PATCH 143/223] use unittest.Logger() --- network/p2p/topic_validator_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/network/p2p/topic_validator_test.go b/network/p2p/topic_validator_test.go index 963ab6e2f9b..264f738dcc0 100644 --- a/network/p2p/topic_validator_test.go +++ b/network/p2p/topic_validator_test.go @@ -9,7 +9,6 @@ import ( "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/peer" pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/rs/zerolog" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" @@ -428,7 +427,7 @@ func TestAuthorizedSenderValidator_ClusterChannel(t *testing.T) { translator, err := p2p.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) - authorizedSenderValidator := validator.AuthorizedSenderMessageValidator(zerolog.Nop(), channel, func(pid peer.ID) (*flow.Identity, bool) { + authorizedSenderValidator := validator.AuthorizedSenderMessageValidator(unittest.Logger(), channel, func(pid peer.ID) (*flow.Identity, bool) { fid, err := translator.GetFlowID(pid) if err != nil { return &flow.Identity{}, false From 1ac357ccb618e192d4b6959d8050f5bf1f0296c7 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 14:49:24 -0400 Subject: [PATCH 144/223] use testify require instead of s.Require --- .../authorized_sender_validator_test.go | 69 ++++++++++--------- 1 file changed, 35 insertions(+), 34 deletions(-) diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index 2685628cfef..899e5d580aa 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -7,6 +7,7 @@ import ( "github.com/libp2p/go-libp2p-core/peer" pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/model/flow" @@ -52,15 +53,15 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_AuthorizedSen validate := AuthorizedSenderValidator(unittest.Logger(), c.Channel, c.GetIdentity) pid, err := unittest.PeerIDFromFlowID(c.Identity) - s.Require().NoError(err) + require.NoError(s.T(), err) msgType, err := validate(ctx, pid, c.Message) - s.Require().NoError(err) - s.Require().Equal(c.MessageStr, msgType) + require.NoError(s.T(), err) + require.Equal(s.T(), c.MessageStr, msgType) validatePubsub := AuthorizedSenderMessageValidator(unittest.Logger(), c.Channel, c.GetIdentity) pubsubResult := validatePubsub(ctx, pid, c.Message) - s.Require().Equal(pubsub.ValidationAccept, pubsubResult) + require.Equal(s.T(), pubsub.ValidationAccept, pubsubResult) }) } } @@ -75,16 +76,16 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedS str := fmt.Sprintf("role (%s) should not be authorized to send message type (%s) on channel (%s)", c.Identity.Role, c.MessageStr, c.Channel) s.Run(str, func() { pid, err := unittest.PeerIDFromFlowID(c.Identity) - s.Require().NoError(err) + require.NoError(s.T(), err) validate := AuthorizedSenderValidator(unittest.Logger(), c.Channel, c.GetIdentity) msgType, err := validate(ctx, pid, c.Message) - s.Require().ErrorIs(err, message.ErrUnauthorizedRole) - s.Require().Equal(c.MessageStr, msgType) + require.ErrorIs(s.T(), err, message.ErrUnauthorizedRole) + require.Equal(s.T(), c.MessageStr, msgType) validatePubsub := AuthorizedSenderMessageValidator(unittest.Logger(), c.Channel, c.GetIdentity) pubsubResult := validatePubsub(ctx, pid, c.Message) - s.Require().Equal(pubsub.ValidationReject, pubsubResult) + require.Equal(s.T(), pubsub.ValidationReject, pubsubResult) }) } } @@ -99,17 +100,17 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedM str := fmt.Sprintf("message type (%s) should not be authorized to be sent on channel (%s)", c.MessageStr, c.Channel) s.Run(str, func() { pid, err := unittest.PeerIDFromFlowID(c.Identity) - s.Require().NoError(err) + require.NoError(s.T(), err) validate := AuthorizedSenderValidator(unittest.Logger(), c.Channel, c.GetIdentity) msgType, err := validate(ctx, pid, c.Message) - s.Require().ErrorIs(err, message.ErrUnauthorizedMessageOnChannel) - s.Require().Equal(c.MessageStr, msgType) + require.ErrorIs(s.T(), err, message.ErrUnauthorizedMessageOnChannel) + require.Equal(s.T(), c.MessageStr, msgType) validatePubsub := AuthorizedSenderMessageValidator(unittest.Logger(), c.Channel, c.GetIdentity) pubsubResult := validatePubsub(ctx, pid, c.Message) - s.Require().Equal(pubsub.ValidationReject, pubsubResult) + require.Equal(s.T(), pubsub.ValidationReject, pubsubResult) }) } } @@ -125,27 +126,27 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ClusterPrefix getIdentityFunc := s.getIdentity(identity) pid, err := unittest.PeerIDFromFlowID(identity) - s.Require().NoError(err) + require.NoError(s.T(), err) // validate collection consensus cluster validateCollConsensus := AuthorizedSenderValidator(unittest.Logger(), channels.ConsensusCluster(clusterID), getIdentityFunc) msgType, err := validateCollConsensus(ctx, pid, &messages.ClusterBlockResponse{}) - s.Require().NoError(err) - s.Require().Equal(message.ClusterBlockResponse, msgType) + require.NoError(s.T(), err) + require.Equal(s.T(), message.ClusterBlockResponse, msgType) validateCollConsensusPubsub := AuthorizedSenderMessageValidator(unittest.Logger(), channels.ConsensusCluster(clusterID), getIdentityFunc) pubsubResult := validateCollConsensusPubsub(ctx, pid, &messages.ClusterBlockResponse{}) - s.Require().Equal(pubsub.ValidationAccept, pubsubResult) + require.Equal(s.T(), pubsub.ValidationAccept, pubsubResult) // validate collection sync cluster validateSyncCluster := AuthorizedSenderValidator(unittest.Logger(), channels.SyncCluster(clusterID), getIdentityFunc) msgType, err = validateSyncCluster(ctx, pid, &messages.SyncRequest{}) - s.Require().NoError(err) - s.Require().Equal(message.SyncRequest, msgType) + require.NoError(s.T(), err) + require.Equal(s.T(), message.SyncRequest, msgType) validateSyncClusterPubsub := AuthorizedSenderMessageValidator(unittest.Logger(), channels.SyncCluster(clusterID), getIdentityFunc) pubsubResult = validateSyncClusterPubsub(ctx, pid, &messages.SyncRequest{}) - s.Require().Equal(pubsub.ValidationAccept, pubsubResult) + require.Equal(s.T(), pubsub.ValidationAccept, pubsubResult) } // TestValidatorCallback_ValidationFailure checks that the call back returned from AuthorizedSenderValidator returns the expected validation error. @@ -158,16 +159,16 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai identity.Ejected = true getIdentityFunc := s.getIdentity(identity) pid, err := unittest.PeerIDFromFlowID(identity) - s.Require().NoError(err) + require.NoError(s.T(), err) validate := AuthorizedSenderValidator(unittest.Logger(), channels.SyncCommittee, getIdentityFunc) msgType, err := validate(ctx, pid, &messages.SyncRequest{}) - s.Require().ErrorIs(err, ErrSenderEjected) - s.Require().Equal("", msgType) + require.ErrorIs(s.T(), err, ErrSenderEjected) + require.Equal(s.T(), "", msgType) validatePubsub := AuthorizedSenderMessageValidator(unittest.Logger(), channels.SyncCommittee, getIdentityFunc) pubsubResult := validatePubsub(ctx, pid, &messages.SyncRequest{}) - s.Require().Equal(pubsub.ValidationReject, pubsubResult) + require.Equal(s.T(), pubsub.ValidationReject, pubsubResult) }) s.Run("unknown message type", func() { @@ -187,24 +188,24 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai getIdentityFunc := s.getIdentity(identity) pid, err := unittest.PeerIDFromFlowID(identity) - s.Require().NoError(err) + require.NoError(s.T(), err) validate := AuthorizedSenderValidator(unittest.Logger(), channels.ConsensusCommittee, getIdentityFunc) validatePubsub := AuthorizedSenderMessageValidator(unittest.Logger(), channels.ConsensusCommittee, getIdentityFunc) // unknown message types are rejected msgType, err := validate(ctx, pid, m) - s.Require().ErrorIs(err, message.ErrUnknownMsgType) - s.Require().Equal("", msgType) + require.ErrorIs(s.T(), err, message.ErrUnknownMsgType) + require.Equal(s.T(), "", msgType) pubsubResult := validatePubsub(ctx, pid, m) - s.Require().Equal(pubsub.ValidationReject, pubsubResult) + require.Equal(s.T(), pubsub.ValidationReject, pubsubResult) // nil messages are rejected msgType, err = validate(ctx, pid, nil) - s.Require().ErrorIs(err, message.ErrUnknownMsgType) - s.Require().Equal("", msgType) + require.ErrorIs(s.T(), err, message.ErrUnknownMsgType) + require.Equal(s.T(), "", msgType) pubsubResult = validatePubsub(ctx, pid, nil) - s.Require().Equal(pubsub.ValidationReject, pubsubResult) + require.Equal(s.T(), pubsub.ValidationReject, pubsubResult) }) s.Run("sender is not staked getIdentityFunc does not return identity ", func() { @@ -217,16 +218,16 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai getIdentityFunc := func(id peer.ID) (*flow.Identity, bool) { return nil, false } pid, err := unittest.PeerIDFromFlowID(identity) - s.Require().NoError(err) + require.NoError(s.T(), err) validate := AuthorizedSenderValidator(unittest.Logger(), channels.SyncCommittee, getIdentityFunc) msgType, err := validate(ctx, pid, &messages.SyncRequest{}) - s.Require().ErrorIs(err, ErrIdentityUnverified) - s.Require().Equal("", msgType) + require.ErrorIs(s.T(), err, ErrIdentityUnverified) + require.Equal(s.T(), "", msgType) validatePubsub := AuthorizedSenderMessageValidator(unittest.Logger(), channels.SyncCommittee, getIdentityFunc) pubsubResult := validatePubsub(ctx, pid, &messages.SyncRequest{}) - s.Require().Equal(pubsub.ValidationReject, pubsubResult) + require.Equal(s.T(), pubsub.ValidationReject, pubsubResult) }) } From 81b1ef705b873c66cd99138e24e0ef180c09e1f8 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 15:25:46 -0400 Subject: [PATCH 145/223] Update network/message/authorization.go Co-authored-by: Jordan Schalm --- network/message/authorization.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index e7423cf0c9c..58d803c49ec 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -148,7 +148,7 @@ func initializeMessageAuthConfigsMap() { return new(messages.ClusterBlockResponse) }, Config: map[channels.Channel]flow.RoleList{ - channels.ConsensusClusterPrefix: {flow.RoleCollection}, + channels.SyncClusterPrefix: {flow.RoleCollection}, }, } From 10f9fe5e63495108d9b608b835bb444d5be5564d Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 15:27:12 -0400 Subject: [PATCH 146/223] Update network/message/authorization.go only transmit transaction body not transaction Co-authored-by: Jordan Schalm --- network/message/authorization.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index 58d803c49ec..af5560676ac 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -162,15 +162,6 @@ func initializeMessageAuthConfigsMap() { channels.PushGuarantees: {flow.RoleCollection}, // channel alias ReceiveGuarantees = PushGuarantees }, } - AuthorizationConfigs[Transaction] = MsgAuthConfig{ - Name: Transaction, - Type: func() interface{} { - return new(flow.Transaction) - }, - Config: map[channels.Channel]flow.RoleList{ - channels.PushTransactions: {flow.RoleCollection}, // channel alias ReceiveTransactions = PushTransactions - }, - } AuthorizationConfigs[TransactionBody] = MsgAuthConfig{ Name: TransactionBody, Type: func() interface{} { From b909192df4e45e2cf4de459a5cd7d7314e8abae5 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 15:28:37 -0400 Subject: [PATCH 147/223] Update network/message/authorization.go Co-authored-by: Jordan Schalm --- network/message/authorization.go | 1 - 1 file changed, 1 deletion(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index af5560676ac..a0578a9cfe2 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -119,7 +119,6 @@ func initializeMessageAuthConfigsMap() { }, Config: map[channels.Channel]flow.RoleList{ channels.SyncCommittee: {flow.RoleConsensus}, - channels.SyncClusterPrefix: {flow.RoleCollection}, }, } From befe7e3690bcc97a2e586bf5df81d9922eed0b8c Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 15:28:54 -0400 Subject: [PATCH 148/223] Update network/message/authorization.go Co-authored-by: Jordan Schalm --- network/message/authorization.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index a0578a9cfe2..1212e87f1e4 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -198,7 +198,7 @@ func initializeMessageAuthConfigsMap() { return new(messages.ChunkDataRequest) }, Config: map[channels.Channel]flow.RoleList{ - channels.ProvideChunks: {flow.RoleVerification}, // channel alias RequestChunks = ProvideChunks + channels.RequestChunks: {flow.RoleVerification}, // channel alias RequestChunks = ProvideChunks }, } AuthorizationConfigs[ChunkDataResponse] = MsgAuthConfig{ From 2fff9ca2ff1de9d6845b60abad80d7da8dd0c3b4 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 15:29:16 -0400 Subject: [PATCH 149/223] Update network/message/authorization.go Co-authored-by: Jordan Schalm --- network/message/authorization.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index 1212e87f1e4..0b85be248a7 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -218,7 +218,7 @@ func initializeMessageAuthConfigsMap() { return new(messages.ApprovalRequest) }, Config: map[channels.Channel]flow.RoleList{ - channels.ProvideApprovalsByChunk: {flow.RoleConsensus}, + channels.RequestApprovalsByChunk: {flow.RoleConsensus}, // channel alias ProvideApprovalsByChunk = RequestApprovalsByChunk }, } AuthorizationConfigs[ApprovalResponse] = MsgAuthConfig{ From e946e8b1d9eb99f08a2b7a3466ab722aa1d8ddbe Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 15:29:44 -0400 Subject: [PATCH 150/223] Update network/message/authorization.go Co-authored-by: Jordan Schalm --- network/message/authorization.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index 0b85be248a7..e6ff46d21ab 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -227,7 +227,8 @@ func initializeMessageAuthConfigsMap() { return new(messages.ApprovalResponse) }, Config: map[channels.Channel]flow.RoleList{ - channels.ProvideApprovalsByChunk: {flow.RoleVerification}, + channels.ProvideApprovalsByChunk: {flow.RoleVerification}, // channel alias ProvideApprovalsByChunk = RequestApprovalsByChunk + }, } From 523ceb6b3be5e9dc6b36b7c5309935684fde1c62 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 15:30:28 -0400 Subject: [PATCH 151/223] Update network/message/authorization.go Co-authored-by: Jordan Schalm --- network/message/authorization.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index e6ff46d21ab..29753b4ea6f 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -239,8 +239,6 @@ func initializeMessageAuthConfigsMap() { return new(messages.EntityRequest) }, Config: map[channels.Channel]flow.RoleList{ - channels.RequestChunks: {flow.RoleVerification}, - channels.RequestApprovalsByChunk: {flow.RoleConsensus}, channels.RequestReceiptsByBlockID: {flow.RoleConsensus}, channels.RequestCollections: {flow.RoleAccess, flow.RoleExecution}, }, From 2eefc06a6c98aba7d42b94cb4e993573a88ce7de Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 15:31:22 -0400 Subject: [PATCH 152/223] Update network/message/authorization.go Co-authored-by: Jordan Schalm --- network/message/authorization.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index 29753b4ea6f..1cc492ada8a 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -249,8 +249,6 @@ func initializeMessageAuthConfigsMap() { return new(messages.EntityResponse) }, Config: map[channels.Channel]flow.RoleList{ - channels.ProvideChunks: {flow.RoleExecution}, - channels.ProvideCollections: {flow.RoleCollection}, channels.ProvideApprovalsByChunk: {flow.RoleVerification}, channels.ProvideReceiptsByBlockID: {flow.RoleExecution}, }, From 3a7ba34072d08ca00049dc64feae196fc8384ef3 Mon Sep 17 00:00:00 2001 From: lolpuddle Date: Tue, 12 Jul 2022 15:52:02 -0400 Subject: [PATCH 153/223] Connections refreshed instead of removed --- engine/access/rpc/backend/backend_accounts.go | 2 +- engine/access/rpc/backend/backend_events.go | 2 +- engine/access/rpc/backend/backend_scripts.go | 2 +- engine/access/rpc/backend/backend_test.go | 6 +- .../rpc/backend/backend_transactions.go | 8 +-- .../access/rpc/backend/connection_factory.go | 70 ++++++++++++------- .../rpc/backend/connection_factory_test.go | 15 +--- .../rpc/backend/mock/connection_factory.go | 30 ++------ 8 files changed, 61 insertions(+), 74 deletions(-) diff --git a/engine/access/rpc/backend/backend_accounts.go b/engine/access/rpc/backend/backend_accounts.go index 3c3ac5b080b..ba6cced5108 100644 --- a/engine/access/rpc/backend/backend_accounts.go +++ b/engine/access/rpc/backend/backend_accounts.go @@ -160,7 +160,7 @@ func (b *backendAccounts) tryGetAccount(ctx context.Context, execNode *flow.Iden resp, err := execRPCClient.GetAccountAtBlockID(ctx, &req) if err != nil { if status.Code(err) == codes.Unavailable { - b.connFactory.InvalidateExecutionAPIClient(execNode.Address) + b.connFactory.RefreshExecutionAPIClient(execNode.Address) } return nil, err } diff --git a/engine/access/rpc/backend/backend_events.go b/engine/access/rpc/backend/backend_events.go index cc047606b3a..44e64581e32 100644 --- a/engine/access/rpc/backend/backend_events.go +++ b/engine/access/rpc/backend/backend_events.go @@ -214,7 +214,7 @@ func (b *backendEvents) tryGetEvents(ctx context.Context, resp, err := execRPCClient.GetEventsForBlockIDs(ctx, &req) if err != nil { if status.Code(err) == codes.Unavailable { - b.connFactory.InvalidateExecutionAPIClient(execNode.Address) + b.connFactory.RefreshExecutionAPIClient(execNode.Address) } return nil, err } diff --git a/engine/access/rpc/backend/backend_scripts.go b/engine/access/rpc/backend/backend_scripts.go index ba0c72ed726..ca2f981b85b 100644 --- a/engine/access/rpc/backend/backend_scripts.go +++ b/engine/access/rpc/backend/backend_scripts.go @@ -172,7 +172,7 @@ func (b *backendScripts) tryExecuteScript(ctx context.Context, execNode *flow.Id execResp, err := execRPCClient.ExecuteScriptAtBlockID(ctx, &req) if err != nil { if status.Code(err) == codes.Unavailable { - b.connFactory.InvalidateExecutionAPIClient(execNode.Address) + b.connFactory.RefreshExecutionAPIClient(execNode.Address) } return nil, status.Errorf(status.Code(err), "failed to execute the script on the execution node %s: %v", execNode.String(), err) } diff --git a/engine/access/rpc/backend/backend_test.go b/engine/access/rpc/backend/backend_test.go index 193752ed940..953d2f25ca8 100644 --- a/engine/access/rpc/backend/backend_test.go +++ b/engine/access/rpc/backend/backend_test.go @@ -767,7 +767,7 @@ func (suite *Suite) TestTransactionStatusTransition() { // create a mock connection factory connFactory := new(backendmock.ConnectionFactory) connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, nil) - connFactory.On("InvalidateExecutionAPIClient", mock.Anything).Return(false) + connFactory.On("RefreshExecutionAPIClient", mock.Anything) exeEventReq := execproto.GetTransactionResultRequest{ BlockId: blockID[:], @@ -2162,7 +2162,7 @@ func (suite *Suite) TestExecuteScriptOnExecutionNode() { connFactory := new(backendmock.ConnectionFactory) connFactory.On("GetExecutionAPIClient", mock.Anything). Return(suite.execClient, nil) - connFactory.On("InvalidateExecutionAPIClient", mock.Anything).Return(false) + connFactory.On("RefreshExecutionAPIClient", mock.Anything) // create the handler with the mock backend := New( @@ -2263,7 +2263,7 @@ func (suite *Suite) setupConnectionFactory() ConnectionFactory { // create a mock connection factory connFactory := new(backendmock.ConnectionFactory) connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, nil) - connFactory.On("InvalidateExecutionAPIClient", mock.Anything).Return(false) + connFactory.On("RefreshExecutionAPIClient", mock.Anything) return connFactory } diff --git a/engine/access/rpc/backend/backend_transactions.go b/engine/access/rpc/backend/backend_transactions.go index 6c7c7ef5e61..9635a0bd204 100644 --- a/engine/access/rpc/backend/backend_transactions.go +++ b/engine/access/rpc/backend/backend_transactions.go @@ -152,7 +152,7 @@ func (b *backendTransactions) sendTransactionToCollector(ctx context.Context, err = b.grpcTxSend(ctx, collectionRPC, tx) if err != nil { if status.Code(err) == codes.Unavailable { - b.connFactory.InvalidateAccessAPIClient(collectionNodeAddr) + b.connFactory.RefreshAccessAPIClient(collectionNodeAddr) } return fmt.Errorf("failed to send transaction to collection node at %s: %v", collectionNodeAddr, err) } @@ -711,7 +711,7 @@ func (b *backendTransactions) tryGetTransactionResult( resp, err := execRPCClient.GetTransactionResult(ctx, &req) if err != nil { if status.Code(err) == codes.Unavailable { - b.connFactory.InvalidateExecutionAPIClient(execNode.Address) + b.connFactory.RefreshExecutionAPIClient(execNode.Address) } return nil, err } @@ -767,7 +767,7 @@ func (b *backendTransactions) tryGetTransactionResultsByBlockID( resp, err := execRPCClient.GetTransactionResultsByBlockID(ctx, &req) if err != nil { if status.Code(err) == codes.Unavailable { - b.connFactory.InvalidateExecutionAPIClient(execNode.Address) + b.connFactory.RefreshExecutionAPIClient(execNode.Address) } return nil, err } @@ -824,7 +824,7 @@ func (b *backendTransactions) tryGetTransactionResultByIndex( resp, err := execRPCClient.GetTransactionResultByIndex(ctx, &req) if err != nil { if status.Code(err) == codes.Unavailable { - b.connFactory.InvalidateExecutionAPIClient(execNode.Address) + b.connFactory.RefreshExecutionAPIClient(execNode.Address) } return nil, err } diff --git a/engine/access/rpc/backend/connection_factory.go b/engine/access/rpc/backend/connection_factory.go index 62f404c6d85..2d977de8e2b 100644 --- a/engine/access/rpc/backend/connection_factory.go +++ b/engine/access/rpc/backend/connection_factory.go @@ -24,9 +24,9 @@ const defaultClientTimeout = 3 * time.Second // ConnectionFactory is used to create an access api client type ConnectionFactory interface { GetAccessAPIClient(address string) (access.AccessAPIClient, error) - InvalidateAccessAPIClient(address string) bool + RefreshAccessAPIClient(address string) GetExecutionAPIClient(address string) (execution.ExecutionAPIClient, error) - InvalidateExecutionAPIClient(address string) bool + RefreshExecutionAPIClient(address string) } type ProxyConnectionFactory struct { @@ -106,29 +106,45 @@ func (cf *ConnectionFactoryImpl) retrieveConnection(grpcAddress string, timeout // this lock prevents a memory leak where a race condition may occur if 2 requests to a new connection at the // same address occur. the second add would overwrite the first without closing the connection cf.lock.Lock() + defer cf.lock.Unlock() + // Check if connection was created/refreshed by another thread + if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { + conn = res.(ConnectionCacheStore).ClientConn + if conn.GetState() == connectivity.Ready { + return conn, nil + } + } + // updates to the cache don't trigger evictions; this line closes connections before re-establishing new ones if conn != nil { conn.Close() } var err error - conn, err = cf.createConnection(grpcAddress, timeout) + conn, err = cf.addConnection(grpcAddress, timeout, mutex) if err != nil { return nil, err } + } + return conn, nil +} - store := ConnectionCacheStore{ - ClientConn: conn, - mutex: new(sync.Mutex), - } - if mutex != nil { - store.mutex = mutex - } +func (cf *ConnectionFactoryImpl) addConnection(grpcAddress string, timeout time.Duration, mutex *sync.Mutex) (*grpc.ClientConn, error) { + conn, err := cf.createConnection(grpcAddress, timeout) + if err != nil { + return nil, err + } - cf.ConnectionsCache.Add(grpcAddress, store) - cf.lock.Unlock() - if cf.AccessMetrics != nil { - cf.AccessMetrics.TotalConnectionsInPool(uint(cf.ConnectionsCache.Len()), cf.CacheSize) - } + store := ConnectionCacheStore{ + ClientConn: conn, + mutex: new(sync.Mutex), + } + if mutex != nil { + store.mutex = mutex + } + + cf.ConnectionsCache.Add(grpcAddress, store) + if cf.AccessMetrics != nil { + cf.AccessMetrics.TotalConnectionsInPool(uint(cf.ConnectionsCache.Len()), cf.CacheSize) } return conn, nil } @@ -149,17 +165,17 @@ func (cf *ConnectionFactoryImpl) GetAccessAPIClient(address string) (access.Acce return accessAPIClient, nil } -func (cf *ConnectionFactoryImpl) InvalidateAccessAPIClient(address string) bool { - grpcAddress, err := getGRPCAddress(address, cf.CollectionGRPCPort) +func (cf *ConnectionFactoryImpl) RefreshAccessAPIClient(address string) { + grpcAddress, _ := getGRPCAddress(address, cf.CollectionGRPCPort) if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { store := res.(ConnectionCacheStore) store.mutex.Lock() + cf.lock.Lock() defer store.mutex.Unlock() + defer cf.lock.Unlock() + store.ClientConn.Close() + cf.addConnection(grpcAddress, cf.CollectionNodeGRPCTimeout, store.mutex) } - if err != nil { - return true - } - return cf.ConnectionsCache.Remove(grpcAddress) } func (cf *ConnectionFactoryImpl) GetExecutionAPIClient(address string) (execution.ExecutionAPIClient, error) { @@ -178,17 +194,17 @@ func (cf *ConnectionFactoryImpl) GetExecutionAPIClient(address string) (executio return executionAPIClient, nil } -func (cf *ConnectionFactoryImpl) InvalidateExecutionAPIClient(address string) bool { - grpcAddress, err := getGRPCAddress(address, cf.ExecutionGRPCPort) +func (cf *ConnectionFactoryImpl) RefreshExecutionAPIClient(address string) { + grpcAddress, _ := getGRPCAddress(address, cf.CollectionGRPCPort) if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { store := res.(ConnectionCacheStore) store.mutex.Lock() + cf.lock.Lock() defer store.mutex.Unlock() + defer cf.lock.Unlock() + store.ClientConn.Close() + cf.addConnection(grpcAddress, cf.ExecutionNodeGRPCTimeout, store.mutex) } - if err != nil { - return true - } - return cf.ConnectionsCache.Remove(grpcAddress) } // getExecutionNodeAddress translates flow.Identity address to the GRPC address of the node by switching the port to the diff --git a/engine/access/rpc/backend/connection_factory_test.go b/engine/access/rpc/backend/connection_factory_test.go index 0f2473b89c4..0c3268c745b 100644 --- a/engine/access/rpc/backend/connection_factory_test.go +++ b/engine/access/rpc/backend/connection_factory_test.go @@ -58,7 +58,6 @@ func TestProxyAccessAPI(t *testing.T) { resp, err := client.Ping(ctx, req) assert.NoError(t, err) assert.Equal(t, resp, expected) - proxyConnectionFactory.InvalidateAccessAPIClient("foo") } func TestProxyExecutionAPI(t *testing.T) { @@ -97,7 +96,6 @@ func TestProxyExecutionAPI(t *testing.T) { resp, err := client.Ping(ctx, req) assert.NoError(t, err) assert.Equal(t, resp, expected) - proxyConnectionFactory.InvalidateExecutionAPIClient("foo") } func TestProxyAccessAPIConnectionReuse(t *testing.T) { @@ -143,7 +141,6 @@ func TestProxyAccessAPIConnectionReuse(t *testing.T) { resp, err := accessAPIClient.Ping(ctx, req) assert.NoError(t, err) assert.Equal(t, resp, expected) - proxyConnectionFactory.InvalidateAccessAPIClient("foo") } func TestProxyExecutionAPIConnectionReuse(t *testing.T) { @@ -189,7 +186,6 @@ func TestProxyExecutionAPIConnectionReuse(t *testing.T) { resp, err := executionAPIClient.Ping(ctx, req) assert.NoError(t, err) assert.Equal(t, resp, expected) - proxyConnectionFactory.InvalidateExecutionAPIClient("foo") } // TestExecutionNodeClientTimeout tests that the execution API client times out after the timeout duration @@ -231,7 +227,6 @@ func TestExecutionNodeClientTimeout(t *testing.T) { // assert that the client timed out assert.Equal(t, codes.DeadlineExceeded, status.Code(err)) - connectionFactory.InvalidateExecutionAPIClient(en.listener.Addr().String()) } // TestCollectionNodeClientTimeout tests that the collection API client times out after the timeout duration @@ -273,7 +268,6 @@ func TestCollectionNodeClientTimeout(t *testing.T) { // assert that the client timed out assert.Equal(t, codes.DeadlineExceeded, status.Code(err)) - connectionFactory.InvalidateAccessAPIClient(cn.listener.Addr().String()) } // TestConnectionPoolFull tests that the LRU cache replaces connections when full @@ -345,9 +339,6 @@ func TestConnectionPoolFull(t *testing.T) { assert.True(t, contains1) assert.False(t, contains2) assert.True(t, contains3) - connectionFactory.InvalidateAccessAPIClient(cn1Address) - connectionFactory.InvalidateAccessAPIClient(cn2Address) - connectionFactory.InvalidateAccessAPIClient(cn3Address) } // TestConnectionPoolStale tests that a new connection will be established if the old one cached is stale @@ -384,10 +375,9 @@ func TestConnectionPoolStale(t *testing.T) { assert.Equal(t, connectionFactory.ConnectionsCache.Len(), 1) assert.NoError(t, err) // close connection to simulate something "going wrong" with our stored connection - proxyConnectionFactory.InvalidateAccessAPIClient(proxyConnectionFactory.targetAddress) + res, _ := connectionFactory.ConnectionsCache.Get(proxyConnectionFactory.targetAddress) - // check if key still exists (should no longer exist) - assert.False(t, connectionFactory.ConnectionsCache.Contains(proxyConnectionFactory.targetAddress)) + res.(ConnectionCacheStore).ClientConn.Close() ctx := context.Background() // make the call to the collection node (should fail, connection closed) @@ -409,7 +399,6 @@ func TestConnectionPoolStale(t *testing.T) { resp, err := accessAPIClient.Ping(ctx, req) assert.NoError(t, err) assert.Equal(t, resp, expected) - proxyConnectionFactory.InvalidateAccessAPIClient("foo") } // node mocks a flow node that runs a GRPC server diff --git a/engine/access/rpc/backend/mock/connection_factory.go b/engine/access/rpc/backend/mock/connection_factory.go index 9cc397d47c5..39dc25f49f9 100644 --- a/engine/access/rpc/backend/mock/connection_factory.go +++ b/engine/access/rpc/backend/mock/connection_factory.go @@ -61,32 +61,14 @@ func (_m *ConnectionFactory) GetExecutionAPIClient(address string) (execution.Ex return r0, r1 } -// InvalidateAccessAPIClient provides a mock function with given fields: address -func (_m *ConnectionFactory) InvalidateAccessAPIClient(address string) bool { - ret := _m.Called(address) - - var r0 bool - if rf, ok := ret.Get(0).(func(string) bool); ok { - r0 = rf(address) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 +// RefreshAccessAPIClient provides a mock function with given fields: address +func (_m *ConnectionFactory) RefreshAccessAPIClient(address string) { + _m.Called(address) } -// InvalidateExecutionAPIClient provides a mock function with given fields: address -func (_m *ConnectionFactory) InvalidateExecutionAPIClient(address string) bool { - ret := _m.Called(address) - - var r0 bool - if rf, ok := ret.Get(0).(func(string) bool); ok { - r0 = rf(address) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 +// RefreshExecutionAPIClient provides a mock function with given fields: address +func (_m *ConnectionFactory) RefreshExecutionAPIClient(address string) { + _m.Called(address) } type NewConnectionFactoryT interface { From 27f9b670d1716c81f7361968f5efc5293fa52114 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Tue, 12 Jul 2022 13:22:28 -0700 Subject: [PATCH 154/223] work in progress --- access/handler.go | 80 +++++++++------------------ cmd/execution_builder.go | 3 +- engine/access/access_test.go | 28 ++++++---- engine/common/rpc/convert/convert.go | 2 +- engine/common/rpc/convert/validate.go | 2 +- engine/execution/rpc/engine.go | 25 ++++++--- state/protocol/util.go | 6 +- 7 files changed, 66 insertions(+), 80 deletions(-) diff --git a/access/handler.go b/access/handler.go index 7faf0b96435..3ac30e95b04 100644 --- a/access/handler.go +++ b/access/handler.go @@ -3,29 +3,30 @@ package access import ( "context" - "github.com/onflow/flow/protobuf/go/flow/access" - "github.com/onflow/flow/protobuf/go/flow/entities" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/timestamppb" + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" ) type Handler struct { api API chain flow.Chain // TODO: update to Replicas API once active PaceMaker is merged - state protocol.State + committee hotstuff.Committee } -func NewHandler(api API, chain flow.Chain, state protocol.State) *Handler { +func NewHandler(api API, chain flow.Chain, committee hotstuff.Committee) *Handler { return &Handler{ - api: api, - chain: chain, - state: state, + api: api, + chain: chain, + committee: committee, } } @@ -59,13 +60,7 @@ func (h *Handler) GetLatestBlockHeader( if err != nil { return nil, err } - - signerIDs, err := protocol.DecodeSignerIDs(h.state, header) - if err != nil { - return nil, err - } - - return blockHeaderResponse(header, signerIDs) + return h.blockHeaderResponse(header) } // GetBlockHeaderByHeight gets a block header by height. @@ -77,13 +72,7 @@ func (h *Handler) GetBlockHeaderByHeight( if err != nil { return nil, err } - - signerIDs, err := protocol.DecodeSignerIDs(h.state, header) - if err != nil { - return nil, err - } - - return blockHeaderResponse(header, signerIDs) + return h.blockHeaderResponse(header) } // GetBlockHeaderByID gets a block header by ID. @@ -95,18 +84,11 @@ func (h *Handler) GetBlockHeaderByID( if err != nil { return nil, err } - header, err := h.api.GetBlockHeaderByID(ctx, id) if err != nil { return nil, err } - - signerIDs, err := protocol.DecodeSignerIDs(h.state, header) - if err != nil { - return nil, err - } - - return blockHeaderResponse(header, signerIDs) + return h.blockHeaderResponse(header) } // GetLatestBlock gets the latest sealed block. @@ -118,13 +100,7 @@ func (h *Handler) GetLatestBlock( if err != nil { return nil, err } - - signerIDs, err := protocol.DecodeSignerIDs(h.state, block.Header) - if err != nil { - return nil, err - } - - return blockResponse(block, signerIDs, req.GetFullBlockResponse()) + return h.blockResponse(block, req.GetFullBlockResponse()) } // GetBlockByHeight gets a block by height. @@ -136,13 +112,7 @@ func (h *Handler) GetBlockByHeight( if err != nil { return nil, err } - - signerIDs, err := protocol.DecodeSignerIDs(h.state, block.Header) - if err != nil { - return nil, err - } - - return blockResponse(block, signerIDs, req.GetFullBlockResponse()) + return h.blockResponse(block, req.GetFullBlockResponse()) } // GetBlockByID gets a block by ID. @@ -154,18 +124,11 @@ func (h *Handler) GetBlockByID( if err != nil { return nil, err } - block, err := h.api.GetBlockByID(ctx, id) if err != nil { return nil, err } - - signerIDs, err := protocol.DecodeSignerIDs(h.state, block.Header) - if err != nil { - return nil, err - } - - return blockResponse(block, signerIDs, req.GetFullBlockResponse()) + return h.blockResponse(block, req.GetFullBlockResponse()) } // GetCollectionByID gets a collection by ID. @@ -520,9 +483,13 @@ func (h *Handler) GetExecutionResultForBlockID(ctx context.Context, req *access. return executionResultToMessages(result) } -func blockResponse(block *flow.Block, signerIDs []flow.Identifier, fullResponse bool) (*access.BlockResponse, error) { +func (h *Handler) blockResponse(block *flow.Block, fullResponse bool) (*access.BlockResponse, error) { + signerIDs, err := protocol.DecodeSignerIDs(h.committee, block.Header) + if err != nil { + return nil, err + } + var msg *entities.Block - var err error if fullResponse { msg, err = convert.BlockToMessage(block, signerIDs) if err != nil { @@ -536,7 +503,12 @@ func blockResponse(block *flow.Block, signerIDs []flow.Identifier, fullResponse }, nil } -func blockHeaderResponse(header *flow.Header, signerIDs []flow.Identifier) (*access.BlockHeaderResponse, error) { +func (h *Handler) blockHeaderResponse(header *flow.Header) (*access.BlockHeaderResponse, error) { + signerIDs, err := protocol.DecodeSignerIDs(h.committee, header) + if err != nil { + return nil, err + } + msg, err := convert.BlockHeaderToMessage(header, signerIDs) if err != nil { return nil, err diff --git a/cmd/execution_builder.go b/cmd/execution_builder.go index f2b6e48fff2..30ce6453e0c 100644 --- a/cmd/execution_builder.go +++ b/cmd/execution_builder.go @@ -777,7 +777,7 @@ func (e *ExecutionNodeBuilder) LoadComponentsAndModules() { return syncEngine, nil }). Component("grpc server", func(node *NodeConfig) (module.ReadyDoneAware, error) { - rpcEng := rpc.New( + return rpc.New( node.Logger, e.exeConf.rpcConf, ingestionEng, @@ -791,7 +791,6 @@ func (e *ExecutionNodeBuilder) LoadComponentsAndModules() { e.exeConf.apiRatelimits, e.exeConf.apiBurstlimits, ) - return rpcEng, nil }) } diff --git a/engine/access/access_test.go b/engine/access/access_test.go index ed4dfe236d7..baa191d93d4 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -1,15 +1,14 @@ -package access +package access_test import ( "context" "encoding/json" + "github.com/onflow/flow-go/consensus/hotstuff" + "github.com/onflow/flow-go/consensus/hotstuff/committees" "os" "testing" "github.com/dgraph-io/badger/v2" - accessproto "github.com/onflow/flow/protobuf/go/flow/access" - entitiesproto "github.com/onflow/flow/protobuf/go/flow/entities" - execproto "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -41,11 +40,15 @@ import ( "github.com/onflow/flow-go/storage/badger/operation" "github.com/onflow/flow-go/storage/util" "github.com/onflow/flow-go/utils/unittest" + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + entitiesproto "github.com/onflow/flow/protobuf/go/flow/entities" + execproto "github.com/onflow/flow/protobuf/go/flow/execution" ) type Suite struct { suite.Suite state *protocol.State + committee hotstuff.Committee snapshot *protocol.Snapshot epochQuery *protocol.EpochQuery log zerolog.Logger @@ -66,10 +69,13 @@ func TestAccess(t *testing.T) { } func (suite *Suite) SetupTest() { + var err error suite.log = zerolog.New(os.Stderr) suite.net = new(mocknetwork.Network) - suite.state = new(protocol.State) suite.snapshot = new(protocol.Snapshot) + suite.state = new(protocol.State) + suite.committee, err = committees.NewConsensusCommittee(suite.state, flow.ZeroID) + suite.Require().NoError(err) suite.epochQuery = new(protocol.EpochQuery) suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() @@ -126,7 +132,7 @@ func (suite *Suite) RunTest( backend.DefaultSnapshotHistoryLimit, ) - handler := access.NewHandler(suite.backend, suite.chainID.Chain(), suite.state) + handler := access.NewHandler(suite.backend, suite.chainID.Chain(), suite.committee) f(handler, db, blocks, headers, results) }) @@ -302,7 +308,7 @@ func (suite *Suite) TestSendTransactionToRandomCollectionNode() { backend.DefaultSnapshotHistoryLimit, ) - handler := access.NewHandler(backend, suite.chainID.Chain(), suite.state) + handler := access.NewHandler(backend, suite.chainID.Chain(), suite.committee) // Send transaction 1 resp, err := handler.SendTransaction(context.Background(), sendReq1) @@ -368,7 +374,7 @@ func (suite *Suite) TestGetBlockByIDAndHeight() { assertHeaderResp := func(resp *accessproto.BlockHeaderResponse, err error, header *flow.Header) { suite.state.On("AtBlockID", header.ID()).Return(snapshotForSignerIDs, nil) - expectedSignerIDs, err := protocolInterface.DecodeSignerIDs(suite.state, header) + expectedSignerIDs, err := protocolInterface.DecodeSignerIDs(suite.committee, header) require.NoError(suite.T(), err) require.NoError(suite.T(), err) @@ -387,7 +393,7 @@ func (suite *Suite) TestGetBlockByIDAndHeight() { require.NotNil(suite.T(), resp) actual := resp.Block suite.state.On("AtBlockID", block.Header.ID()).Return(snapshotForSignerIDs, nil) - expectedSignerIDs, err := protocolInterface.DecodeSignerIDs(suite.state, block.Header) + expectedSignerIDs, err := protocolInterface.DecodeSignerIDs(suite.committee, block.Header) require.NoError(suite.T(), err) expectedMessage, err := convert.BlockToMessage(block, expectedSignerIDs) require.NoError(suite.T(), err) @@ -632,7 +638,7 @@ func (suite *Suite) TestGetSealedTransaction() { backend.DefaultSnapshotHistoryLimit, ) - handler := access.NewHandler(backend, suite.chainID.Chain(), suite.state) + handler := access.NewHandler(backend, suite.chainID.Chain(), suite.committee) rpcEngBuilder, err := rpc.NewBuilder(suite.log, suite.state, rpc.Config{}, nil, nil, blocks, headers, collections, transactions, receipts, results, suite.chainID, metrics, metrics, 0, 0, false, false, nil, nil) @@ -725,7 +731,7 @@ func (suite *Suite) TestExecuteScript() { backend.DefaultSnapshotHistoryLimit, ) - handler := access.NewHandler(suite.backend, suite.chainID.Chain(), suite.state) + handler := access.NewHandler(suite.backend, suite.chainID.Chain(), suite.committee) // initialize metrics related storage metrics := metrics.NewNoopCollector() diff --git a/engine/common/rpc/convert/convert.go b/engine/common/rpc/convert/convert.go index 4f506ead943..d14e9eb8dab 100644 --- a/engine/common/rpc/convert/convert.go +++ b/engine/common/rpc/convert/convert.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" - "github.com/onflow/flow/protobuf/go/flow/entities" "google.golang.org/protobuf/types/known/timestamppb" "github.com/onflow/flow-go/crypto" @@ -13,6 +12,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow/protobuf/go/flow/entities" ) var ErrEmptyMessage = errors.New("protobuf message is empty") diff --git a/engine/common/rpc/convert/validate.go b/engine/common/rpc/convert/validate.go index a726c478109..92a94438192 100644 --- a/engine/common/rpc/convert/validate.go +++ b/engine/common/rpc/convert/validate.go @@ -27,7 +27,7 @@ func Address(rawAddress []byte, chain flow.Chain) (flow.Address, error) { } func BlockID(blockID []byte) (flow.Identifier, error) { - if len(blockID) == 0 { + if len(blockID) != flow.IdentifierLen { return flow.ZeroID, status.Error(codes.InvalidArgument, "invalid block id") } return flow.HashToID(blockID), nil diff --git a/engine/execution/rpc/engine.go b/engine/execution/rpc/engine.go index d951cb7d4b9..1591d274d7e 100644 --- a/engine/execution/rpc/engine.go +++ b/engine/execution/rpc/engine.go @@ -5,12 +5,13 @@ import ( "encoding/hex" "errors" "fmt" + "github.com/onflow/flow-go/consensus/hotstuff" + "github.com/onflow/flow-go/consensus/hotstuff/committees" "net" "strings" "unicode/utf8" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/rs/zerolog" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -24,6 +25,7 @@ import ( "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/grpcutils" + "github.com/onflow/flow/protobuf/go/flow/execution" ) // Config defines the configurable options for the gRPC server. @@ -56,7 +58,7 @@ func New( chainID flow.ChainID, apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the gRPC API e.g. Ping->100, ExecuteScriptAtBlockID->300 apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the gRPC API e.g. Ping->50, ExecuteScriptAtBlockID->10 -) *Engine { +) (*Engine, error) { log = log.With().Str("engine", "rpc").Logger() if config.MaxMsgSize == 0 { @@ -87,6 +89,11 @@ func New( server := grpc.NewServer(serverOptions...) + committee, err := committees.NewConsensusCommittee(state, flow.ZeroID) + if err != nil { + return nil, fmt.Errorf("initializing hotstuff.Committee abstraction failed: %w", err) + } + eng := &Engine{ log: log, unit: engine.NewUnit(), @@ -96,6 +103,7 @@ func New( blocks: blocks, headers: headers, state: state, + committee: committee, events: events, exeResults: exeResults, transactionResults: txResults, @@ -112,7 +120,7 @@ func New( execution.RegisterExecutionAPIServer(eng.server, eng.handler) - return eng + return eng, nil } // Ready returns a ready channel that is closed once the engine has fully @@ -154,6 +162,7 @@ type handler struct { blocks storage.Blocks headers storage.Headers state protocol.State + committee hotstuff.Committee events storage.Events exeResults storage.ExecutionResults transactionResults storage.TransactionResults @@ -163,7 +172,7 @@ type handler struct { var _ execution.ExecutionAPIServer = &handler{} // Ping responds to requests when the server is up. -func (h *handler) Ping(ctx context.Context, req *execution.PingRequest) (*execution.PingResponse, error) { +func (h *handler) Ping(_ context.Context, _ *execution.PingRequest) (*execution.PingResponse, error) { return &execution.PingResponse{}, nil } @@ -518,7 +527,7 @@ func (h *handler) GetAccountAtBlockID( // GetLatestBlockHeader gets the latest sealed or finalized block header. func (h *handler) GetLatestBlockHeader( - ctx context.Context, + _ context.Context, req *execution.GetLatestBlockHeaderRequest, ) (*execution.BlockHeaderResponse, error) { var header *flow.Header @@ -536,7 +545,7 @@ func (h *handler) GetLatestBlockHeader( return nil, status.Errorf(codes.NotFound, "not found: %v", err) } - signerIDs, err := protocol.DecodeSignerIDs(h.state, header) + signerIDs, err := protocol.DecodeSignerIDs(h.committee, header) if err != nil { return nil, err } @@ -546,7 +555,7 @@ func (h *handler) GetLatestBlockHeader( // GetBlockHeaderByID gets a block header by ID. func (h *handler) GetBlockHeaderByID( - ctx context.Context, + _ context.Context, req *execution.GetBlockHeaderByIDRequest, ) (*execution.BlockHeaderResponse, error) { id, err := convert.BlockID(req.GetId()) @@ -558,7 +567,7 @@ func (h *handler) GetBlockHeaderByID( return nil, status.Errorf(codes.NotFound, "not found: %v", err) } - signerIDs, err := protocol.DecodeSignerIDs(h.state, header) + signerIDs, err := protocol.DecodeSignerIDs(h.committee, header) if err != nil { return nil, err } diff --git a/state/protocol/util.go b/state/protocol/util.go index 82e47552956..61e5b177475 100644 --- a/state/protocol/util.go +++ b/state/protocol/util.go @@ -3,6 +3,7 @@ package protocol import ( "fmt" + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module/signature" @@ -107,12 +108,11 @@ func DecodeSignerIDs(committee hotstuff.Committee, header *flow.Header) ([]flow. if header.ParentVoterIndices == nil && header.View == 0 { return []flow.Identifier{}, nil } - snapshot := state.AtBlockID(header.ID()) - members, err := snapshot.Identities(filter.IsVotingConsensusCommitteeMember) + + members, err := committee.Identities(header.ID()) if err != nil { return nil, fmt.Errorf("fail to retrieve identities for block %v: %w", header.ID(), err) } - signerIDs, err := signature.DecodeSignerIndicesToIdentifiers(members.NodeIDs(), header.ParentVoterIndices) if err != nil { return nil, fmt.Errorf("could not decode signer indices for block %v: %w", header.ID(), err) From 7d064ba04914b981663ca1e060acf1dff42dfa46 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Tue, 12 Jul 2022 13:48:56 -0700 Subject: [PATCH 155/223] resolved circular import --- access/handler.go | 6 +++--- consensus/hotstuff/signature/util.go | 31 ++++++++++++++++++++++++++++ engine/access/access_test.go | 10 ++++----- engine/execution/rpc/engine.go | 26 +++++++++-------------- state/protocol/util.go | 24 --------------------- 5 files changed, 49 insertions(+), 48 deletions(-) create mode 100644 consensus/hotstuff/signature/util.go diff --git a/access/handler.go b/access/handler.go index 3ac30e95b04..0ae5aba0118 100644 --- a/access/handler.go +++ b/access/handler.go @@ -8,9 +8,9 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" "github.com/onflow/flow-go/consensus/hotstuff" + "github.com/onflow/flow-go/consensus/hotstuff/signature" "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow/protobuf/go/flow/access" "github.com/onflow/flow/protobuf/go/flow/entities" ) @@ -484,7 +484,7 @@ func (h *Handler) GetExecutionResultForBlockID(ctx context.Context, req *access. } func (h *Handler) blockResponse(block *flow.Block, fullResponse bool) (*access.BlockResponse, error) { - signerIDs, err := protocol.DecodeSignerIDs(h.committee, block.Header) + signerIDs, err := signature.DecodeSignerIDs(h.committee, block.Header) if err != nil { return nil, err } @@ -504,7 +504,7 @@ func (h *Handler) blockResponse(block *flow.Block, fullResponse bool) (*access.B } func (h *Handler) blockHeaderResponse(header *flow.Header) (*access.BlockHeaderResponse, error) { - signerIDs, err := protocol.DecodeSignerIDs(h.committee, header) + signerIDs, err := signature.DecodeSignerIDs(h.committee, header) if err != nil { return nil, err } diff --git a/consensus/hotstuff/signature/util.go b/consensus/hotstuff/signature/util.go new file mode 100644 index 00000000000..623111fefbb --- /dev/null +++ b/consensus/hotstuff/signature/util.go @@ -0,0 +1,31 @@ +package signature + +import ( + "fmt" + "github.com/onflow/flow-go/consensus/hotstuff" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/signature" +) + +// DecodeSignerIDs decodes the signer indices from the given block header, and finds the signer identifiers from protocol state +// Expected Error returns during normal operations: +// * storage.ErrNotFound if block not found for the given header +// * signature.InvalidSignerIndicesError if `signerIndices` does not encode a valid subset of the consensus committee +// TODO: change `protocol.State` to `Replicas` API once active PaceMaker is merged +func DecodeSignerIDs(committee hotstuff.Committee, header *flow.Header) ([]flow.Identifier, error) { + // root block does not have signer indices + if header.ParentVoterIndices == nil && header.View == 0 { + return []flow.Identifier{}, nil + } + + members, err := committee.Identities(header.ID()) + if err != nil { + return nil, fmt.Errorf("fail to retrieve identities for block %v: %w", header.ID(), err) + } + signerIDs, err := signature.DecodeSignerIndicesToIdentifiers(members.NodeIDs(), header.ParentVoterIndices) + if err != nil { + return nil, fmt.Errorf("could not decode signer indices for block %v: %w", header.ID(), err) + } + + return signerIDs, nil +} diff --git a/engine/access/access_test.go b/engine/access/access_test.go index baa191d93d4..c640d0d8758 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -3,8 +3,6 @@ package access_test import ( "context" "encoding/json" - "github.com/onflow/flow-go/consensus/hotstuff" - "github.com/onflow/flow-go/consensus/hotstuff/committees" "os" "testing" @@ -16,7 +14,10 @@ import ( "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/consensus/hotstuff" + "github.com/onflow/flow-go/consensus/hotstuff/committees" "github.com/onflow/flow-go/consensus/hotstuff/model" + consig "github.com/onflow/flow-go/consensus/hotstuff/signature" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/engine/access/ingestion" accessmock "github.com/onflow/flow-go/engine/access/mock" @@ -34,7 +35,6 @@ import ( "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" - protocolInterface "github.com/onflow/flow-go/state/protocol" protocol "github.com/onflow/flow-go/state/protocol/mock" storage "github.com/onflow/flow-go/storage/badger" "github.com/onflow/flow-go/storage/badger/operation" @@ -374,7 +374,7 @@ func (suite *Suite) TestGetBlockByIDAndHeight() { assertHeaderResp := func(resp *accessproto.BlockHeaderResponse, err error, header *flow.Header) { suite.state.On("AtBlockID", header.ID()).Return(snapshotForSignerIDs, nil) - expectedSignerIDs, err := protocolInterface.DecodeSignerIDs(suite.committee, header) + expectedSignerIDs, err := consig.DecodeSignerIDs(suite.committee, header) require.NoError(suite.T(), err) require.NoError(suite.T(), err) @@ -393,7 +393,7 @@ func (suite *Suite) TestGetBlockByIDAndHeight() { require.NotNil(suite.T(), resp) actual := resp.Block suite.state.On("AtBlockID", block.Header.ID()).Return(snapshotForSignerIDs, nil) - expectedSignerIDs, err := protocolInterface.DecodeSignerIDs(suite.committee, block.Header) + expectedSignerIDs, err := consig.DecodeSignerIDs(suite.committee, block.Header) require.NoError(suite.T(), err) expectedMessage, err := convert.BlockToMessage(block, expectedSignerIDs) require.NoError(suite.T(), err) diff --git a/engine/execution/rpc/engine.go b/engine/execution/rpc/engine.go index 1591d274d7e..9c8d09a718c 100644 --- a/engine/execution/rpc/engine.go +++ b/engine/execution/rpc/engine.go @@ -5,8 +5,6 @@ import ( "encoding/hex" "errors" "fmt" - "github.com/onflow/flow-go/consensus/hotstuff" - "github.com/onflow/flow-go/consensus/hotstuff/committees" "net" "strings" "unicode/utf8" @@ -17,6 +15,9 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "github.com/onflow/flow-go/consensus/hotstuff" + "github.com/onflow/flow-go/consensus/hotstuff/committees" + "github.com/onflow/flow-go/consensus/hotstuff/signature" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/engine/common/rpc/convert" @@ -56,7 +57,7 @@ func New( exeResults storage.ExecutionResults, txResults storage.TransactionResults, chainID flow.ChainID, - apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the gRPC API e.g. Ping->100, ExecuteScriptAtBlockID->300 + apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the gRPC API e.g. Ping->100, ExecuteScriptAtBlockID->300 apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the gRPC API e.g. Ping->50, ExecuteScriptAtBlockID->10 ) (*Engine, error) { log = log.With().Str("engine", "rpc").Logger() @@ -540,17 +541,11 @@ func (h *handler) GetLatestBlockHeader( // get the finalized header from state header, err = h.state.Final().Head() } - if err != nil { return nil, status.Errorf(codes.NotFound, "not found: %v", err) } - signerIDs, err := protocol.DecodeSignerIDs(h.committee, header) - if err != nil { - return nil, err - } - - return blockHeaderResponse(header, signerIDs) + return h.blockHeaderResponse(header) } // GetBlockHeaderByID gets a block header by ID. @@ -566,16 +561,15 @@ func (h *handler) GetBlockHeaderByID( if err != nil { return nil, status.Errorf(codes.NotFound, "not found: %v", err) } + return h.blockHeaderResponse(header) +} - signerIDs, err := protocol.DecodeSignerIDs(h.committee, header) +func (h *handler) blockHeaderResponse(header *flow.Header) (*execution.BlockHeaderResponse, error) { + signerIDs, err := signature.DecodeSignerIDs(h.committee, header) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to decode signer indices to Identifiers for block %v: %w", header.ID(), err) } - return blockHeaderResponse(header, signerIDs) -} - -func blockHeaderResponse(header *flow.Header, signerIDs []flow.Identifier) (*execution.BlockHeaderResponse, error) { msg, err := convert.BlockHeaderToMessage(header, signerIDs) if err != nil { return nil, err diff --git a/state/protocol/util.go b/state/protocol/util.go index 61e5b177475..0c6392ebaf2 100644 --- a/state/protocol/util.go +++ b/state/protocol/util.go @@ -3,7 +3,6 @@ package protocol import ( "fmt" - "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module/signature" @@ -97,26 +96,3 @@ func FindGuarantors(state State, guarantee *flow.CollectionGuarantee) ([]flow.Id return guarantorIDs, nil } - -// DecodeSignerIDs decodes the signer indices from the given block header, and finds the signer identifiers from protocol state -// Expected Error returns during normal operations: -// * storage.ErrNotFound if block not found for the given header -// * signature.InvalidSignerIndicesError if `signerIndices` does not encode a valid subset of the consensus committee -// TODO: change `protocol.State` to `Replicas` API once active PaceMaker is merged -func DecodeSignerIDs(committee hotstuff.Committee, header *flow.Header) ([]flow.Identifier, error) { - // root block does not have signer indices - if header.ParentVoterIndices == nil && header.View == 0 { - return []flow.Identifier{}, nil - } - - members, err := committee.Identities(header.ID()) - if err != nil { - return nil, fmt.Errorf("fail to retrieve identities for block %v: %w", header.ID(), err) - } - signerIDs, err := signature.DecodeSignerIndicesToIdentifiers(members.NodeIDs(), header.ParentVoterIndices) - if err != nil { - return nil, fmt.Errorf("could not decode signer indices for block %v: %w", header.ID(), err) - } - - return signerIDs, nil -} From dc41b673c8b7918080b3c917a4060e97da5e08da Mon Sep 17 00:00:00 2001 From: lolpuddle Date: Tue, 12 Jul 2022 16:57:51 -0400 Subject: [PATCH 156/223] lint fix --- engine/access/rpc/backend/connection_factory.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/engine/access/rpc/backend/connection_factory.go b/engine/access/rpc/backend/connection_factory.go index 2d977de8e2b..29e0cecff44 100644 --- a/engine/access/rpc/backend/connection_factory.go +++ b/engine/access/rpc/backend/connection_factory.go @@ -174,7 +174,7 @@ func (cf *ConnectionFactoryImpl) RefreshAccessAPIClient(address string) { defer store.mutex.Unlock() defer cf.lock.Unlock() store.ClientConn.Close() - cf.addConnection(grpcAddress, cf.CollectionNodeGRPCTimeout, store.mutex) + _, _ = cf.addConnection(grpcAddress, cf.CollectionNodeGRPCTimeout, store.mutex) } } @@ -203,7 +203,7 @@ func (cf *ConnectionFactoryImpl) RefreshExecutionAPIClient(address string) { defer store.mutex.Unlock() defer cf.lock.Unlock() store.ClientConn.Close() - cf.addConnection(grpcAddress, cf.ExecutionNodeGRPCTimeout, store.mutex) + _, _ = cf.addConnection(grpcAddress, cf.CollectionNodeGRPCTimeout, store.mutex) } } From d84abf7bb20ed19d7430fbf5d220e244186d5821 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Tue, 12 Jul 2022 14:06:29 -0700 Subject: [PATCH 157/223] added mock, linted code --- module/mock/wal_metrics.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 module/mock/wal_metrics.go diff --git a/module/mock/wal_metrics.go b/module/mock/wal_metrics.go new file mode 100644 index 00000000000..960a1d5f354 --- /dev/null +++ b/module/mock/wal_metrics.go @@ -0,0 +1,25 @@ +// Code generated by mockery v2.13.0. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// WALMetrics is an autogenerated mock type for the WALMetrics type +type WALMetrics struct { + mock.Mock +} + +type NewWALMetricsT interface { + mock.TestingT + Cleanup(func()) +} + +// NewWALMetrics creates a new instance of WALMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewWALMetrics(t NewWALMetricsT) *WALMetrics { + mock := &WALMetrics{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} From 4427bdf269db23bfd2043ae1208b77a9d4b553fc Mon Sep 17 00:00:00 2001 From: Patrick Lee Date: Tue, 12 Jul 2022 14:55:40 -0700 Subject: [PATCH 158/223] dedup code in script/transaction environment (part 1 of many) A big chunk of the code in ScriptEnv and TransactionEnv are copy/paste identical. It's easy to forget to fix bugs in both place and hard to make improvements. Notes: 1. I've decided to merge the shared code piecemeal since it's scary to merge everything at once. 2. The environment variable e has been renamed to env since it's more searchable 3. (A real change) I've presized the array allocation in RecordTrace --- fvm/env.go | 132 ++++++++++++++++++++++++++++++++++++++++ fvm/scriptEnv.go | 135 +++++------------------------------------ fvm/transactionEnv.go | 136 +++++------------------------------------- 3 files changed, 163 insertions(+), 240 deletions(-) diff --git a/fvm/env.go b/fvm/env.go index c7c29625586..1baebf2e4e0 100644 --- a/fvm/env.go +++ b/fvm/env.go @@ -1,7 +1,22 @@ package fvm import ( + "encoding/binary" + "math/rand" + "time" + "github.com/onflow/cadence/runtime" + "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/runtime/interpreter" + "github.com/opentracing/opentracing-go" + traceLog "github.com/opentracing/opentracing-go/log" + + "github.com/onflow/flow-go/fvm/crypto" + "github.com/onflow/flow-go/fvm/handler" + "github.com/onflow/flow-go/fvm/programs" + "github.com/onflow/flow-go/fvm/state" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/trace" ) // Environment accepts a context and a virtual machine instance and provides @@ -11,3 +26,120 @@ type Environment interface { VM() *VirtualMachine runtime.Interface } + +// Parts of the environment that are common to all transaction and script +// executions. +type commonEnv struct { + ctx Context + sth *state.StateHolder + vm *VirtualMachine + programs *handler.ProgramsHandler + accounts state.Accounts + accountKeys *handler.AccountKeyHandler + contracts *handler.ContractHandler + uuidGenerator *state.UUIDGenerator + metrics *handler.MetricsHandler + logs []string + rng *rand.Rand + traceSpan opentracing.Span +} + +func (env *commonEnv) Context() *Context { + return &env.ctx +} + +func (env *commonEnv) VM() *VirtualMachine { + return env.vm +} + +func (env *commonEnv) seedRNG(header *flow.Header) { + // Seed the random number generator with entropy created from the block + // header ID. The random number generator will be used by the UnsafeRandom + // function. + id := header.ID() + source := rand.NewSource(int64(binary.BigEndian.Uint64(id[:]))) + env.rng = rand.New(source) +} + +func (env *commonEnv) isTraceable() bool { + return env.ctx.Tracer != nil && env.traceSpan != nil +} + +func (env *commonEnv) ImplementationDebugLog(message string) error { + env.ctx.Logger.Debug().Msgf("Cadence: %s", message) + return nil +} + +func (env *commonEnv) RecordTrace(operation string, location common.Location, duration time.Duration, logs []opentracing.LogRecord) { + if !env.isTraceable() { + return + } + if location != nil { + if logs == nil { + logs = make([]opentracing.LogRecord, 0, 1) + } + logs = append(logs, opentracing.LogRecord{Timestamp: time.Now(), + Fields: []traceLog.Field{traceLog.String("location", location.String())}, + }) + } + spanName := trace.FVMCadenceTrace.Child(operation) + env.ctx.Tracer.RecordSpanFromParent(env.traceSpan, spanName, duration, logs) +} + +func (env *commonEnv) ProgramParsed(location common.Location, duration time.Duration) { + env.RecordTrace("parseProgram", location, duration, nil) + env.metrics.ProgramParsed(location, duration) +} + +func (env *commonEnv) ProgramChecked(location common.Location, duration time.Duration) { + env.RecordTrace("checkProgram", location, duration, nil) + env.metrics.ProgramChecked(location, duration) +} + +func (env *commonEnv) ProgramInterpreted(location common.Location, duration time.Duration) { + env.RecordTrace("interpretProgram", location, duration, nil) + env.metrics.ProgramInterpreted(location, duration) +} + +func (env *commonEnv) ValueEncoded(duration time.Duration) { + env.RecordTrace("encodeValue", nil, duration, nil) + env.metrics.ValueEncoded(duration) +} + +func (env *commonEnv) ValueDecoded(duration time.Duration) { + env.RecordTrace("decodeValue", nil, duration, nil) + env.metrics.ValueDecoded(duration) +} + +// Commit commits changes and return a list of updated keys +func (env *commonEnv) Commit() ([]programs.ContractUpdateKey, error) { + // commit changes and return a list of updated keys + err := env.programs.Cleanup() + if err != nil { + return nil, err + } + return env.contracts.Commit() +} + +func (commonEnv) BLSVerifyPOP(pk *runtime.PublicKey, sig []byte) (bool, error) { + return crypto.VerifyPOP(pk, sig) +} + +func (commonEnv) BLSAggregateSignatures(sigs [][]byte) ([]byte, error) { + return crypto.AggregateSignatures(sigs) +} + +func (commonEnv) BLSAggregatePublicKeys( + keys []*runtime.PublicKey, +) (*runtime.PublicKey, error) { + + return crypto.AggregatePublicKeys(keys) +} + +func (commonEnv) ResourceOwnerChanged( + *interpreter.Interpreter, + *interpreter.CompositeValue, + common.Address, + common.Address, +) { +} diff --git a/fvm/scriptEnv.go b/fvm/scriptEnv.go index 968a8fd5c71..a80a547b045 100644 --- a/fvm/scriptEnv.go +++ b/fvm/scriptEnv.go @@ -5,11 +5,8 @@ import ( "encoding/binary" "encoding/hex" "fmt" - "math/rand" - "time" "github.com/onflow/atree" - "github.com/opentracing/opentracing-go" traceLog "github.com/opentracing/opentracing-go/log" "github.com/onflow/cadence" @@ -36,27 +33,9 @@ var _ Environment = &ScriptEnv{} // ScriptEnv is a read-only mostly used for executing scripts. type ScriptEnv struct { - ctx Context - sth *state.StateHolder - vm *VirtualMachine - accounts state.Accounts - contracts *handler.ContractHandler - programs *handler.ProgramsHandler - accountKeys *handler.AccountKeyHandler - metrics *handler.MetricsHandler - uuidGenerator *state.UUIDGenerator - logs []string - rng *rand.Rand - traceSpan opentracing.Span - reqContext context.Context -} - -func (e *ScriptEnv) Context() *Context { - return &e.ctx -} + commonEnv -func (e *ScriptEnv) VM() *VirtualMachine { - return e.vm + reqContext context.Context } func NewScriptEnvironment( @@ -74,15 +53,19 @@ func NewScriptEnvironment( metrics := handler.NewMetricsHandler(fvmContext.Metrics) env := &ScriptEnv{ - ctx: fvmContext, - sth: sth, - vm: vm, - metrics: metrics, - accounts: accounts, - accountKeys: accountKeys, - uuidGenerator: uuidGenerator, - programs: programsHandler, - reqContext: reqContext, + commonEnv: commonEnv{ + ctx: fvmContext, + sth: sth, + vm: vm, + programs: programsHandler, + accounts: accounts, + accountKeys: accountKeys, + uuidGenerator: uuidGenerator, + logs: nil, + rng: nil, + metrics: metrics, + }, + reqContext: reqContext, } env.contracts = handler.NewContractHandler( @@ -169,18 +152,6 @@ func (e *ScriptEnv) setExecutionParameters() { } } -func (e *ScriptEnv) seedRNG(header *flow.Header) { - // Seed the random number generator with entropy created from the block header ID. The random number generator will - // be used by the UnsafeRandom function. - id := header.ID() - source := rand.NewSource(int64(binary.BigEndian.Uint64(id[:]))) - e.rng = rand.New(source) -} - -func (e *ScriptEnv) isTraceable() bool { - return e.ctx.Tracer != nil && e.traceSpan != nil -} - func (e *ScriptEnv) GetValue(owner, key []byte) ([]byte, error) { var valueByteSize int if e.isTraceable() { @@ -837,62 +808,6 @@ func (e *ScriptEnv) GetSigningAccounts() ([]runtime.Address, error) { return nil, errors.NewOperationNotSupportedError("GetSigningAccounts") } -func (e *ScriptEnv) ImplementationDebugLog(message string) error { - e.ctx.Logger.Debug().Msgf("Cadence: %s", message) - return nil -} - -func (e *ScriptEnv) RecordTrace(operation string, location common.Location, duration time.Duration, logs []opentracing.LogRecord) { - if !e.isTraceable() { - return - } - if location != nil { - if logs == nil { - logs = make([]opentracing.LogRecord, 0) - } - logs = append(logs, opentracing.LogRecord{Timestamp: time.Now(), - Fields: []traceLog.Field{traceLog.String("location", location.String())}, - }) - } - spanName := trace.FVMCadenceTrace.Child(operation) - e.ctx.Tracer.RecordSpanFromParent(e.traceSpan, spanName, duration, logs) -} - -func (e *ScriptEnv) ProgramParsed(location common.Location, duration time.Duration) { - e.RecordTrace("parseProgram", location, duration, nil) - e.metrics.ProgramParsed(location, duration) -} - -func (e *ScriptEnv) ProgramChecked(location common.Location, duration time.Duration) { - e.RecordTrace("checkProgram", location, duration, nil) - e.metrics.ProgramChecked(location, duration) -} - -func (e *ScriptEnv) ProgramInterpreted(location common.Location, duration time.Duration) { - e.RecordTrace("interpretProgram", location, duration, nil) - e.metrics.ProgramInterpreted(location, duration) -} - -func (e *ScriptEnv) ValueEncoded(duration time.Duration) { - e.RecordTrace("encodeValue", nil, duration, nil) - e.metrics.ValueEncoded(duration) -} - -func (e *ScriptEnv) ValueDecoded(duration time.Duration) { - e.RecordTrace("decodeValue", nil, duration, nil) - e.metrics.ValueDecoded(duration) -} - -// Commit commits changes and return a list of updated keys -func (e *ScriptEnv) Commit() ([]programs.ContractUpdateKey, error) { - // commit changes and return a list of updated keys - err := e.programs.Cleanup() - if err != nil { - return nil, err - } - return e.contracts.Commit() -} - // AllocateStorageIndex allocates new storage index under the owner accounts to store a new register func (e *ScriptEnv) AllocateStorageIndex(owner []byte) (atree.StorageIndex, error) { err := e.meterComputation(meter.ComputationKindAllocateStorageIndex, 1) @@ -906,23 +821,3 @@ func (e *ScriptEnv) AllocateStorageIndex(owner []byte) (atree.StorageIndex, erro } return v, nil } - -func (e *ScriptEnv) BLSVerifyPOP(pk *runtime.PublicKey, sig []byte) (bool, error) { - return crypto.VerifyPOP(pk, sig) -} - -func (e *ScriptEnv) BLSAggregateSignatures(sigs [][]byte) ([]byte, error) { - return crypto.AggregateSignatures(sigs) -} - -func (e *ScriptEnv) BLSAggregatePublicKeys(keys []*runtime.PublicKey) (*runtime.PublicKey, error) { - return crypto.AggregatePublicKeys(keys) -} - -func (e *ScriptEnv) ResourceOwnerChanged( - *interpreter.Interpreter, - *interpreter.CompositeValue, - common.Address, - common.Address, -) { -} diff --git a/fvm/transactionEnv.go b/fvm/transactionEnv.go index 7e02dbe902a..4d386895ad8 100644 --- a/fvm/transactionEnv.go +++ b/fvm/transactionEnv.go @@ -4,8 +4,6 @@ import ( "encoding/binary" "encoding/hex" "fmt" - "math/rand" - "time" "github.com/onflow/atree" "github.com/onflow/cadence" @@ -35,23 +33,13 @@ var _ runtime.Interface = &TransactionEnv{} // TransactionEnv is a read-write environment used for executing flow transactions. type TransactionEnv struct { - vm *VirtualMachine - ctx Context - sth *state.StateHolder - programs *handler.ProgramsHandler - accounts state.Accounts - uuidGenerator *state.UUIDGenerator - contracts *handler.ContractHandler - accountKeys *handler.AccountKeyHandler - metrics *handler.MetricsHandler + commonEnv + eventHandler *handler.EventHandler addressGenerator flow.AddressGenerator - rng *rand.Rand - logs []string tx *flow.TransactionBody txIndex uint32 txID flow.Identifier - traceSpan opentracing.Span authorizers []runtime.Address } @@ -79,20 +67,25 @@ func NewTransactionEnvironment( metrics := handler.NewMetricsHandler(ctx.Metrics) env := &TransactionEnv{ - vm: vm, - ctx: ctx, - sth: sth, - metrics: metrics, - programs: programsHandler, - accounts: accounts, - accountKeys: accountKeys, + commonEnv: commonEnv{ + ctx: ctx, + sth: sth, + vm: vm, + programs: programsHandler, + accounts: accounts, + accountKeys: accountKeys, + uuidGenerator: uuidGenerator, + metrics: metrics, + logs: nil, + rng: nil, + traceSpan: traceSpan, + }, + addressGenerator: generator, - uuidGenerator: uuidGenerator, eventHandler: eventHandler, tx: tx, txIndex: txIndex, txID: tx.ID(), - traceSpan: traceSpan, } env.contracts = handler.NewContractHandler(accounts, @@ -203,26 +196,6 @@ func (e *TransactionEnv) TxID() flow.Identifier { return e.txID } -func (e *TransactionEnv) Context() *Context { - return &e.ctx -} - -func (e *TransactionEnv) VM() *VirtualMachine { - return e.vm -} - -func (e *TransactionEnv) seedRNG(header *flow.Header) { - // Seed the random number generator with entropy created from the block header ID. The random number generator will - // be used by the UnsafeRandom function. - id := header.ID() - source := rand.NewSource(int64(binary.BigEndian.Uint64(id[:]))) - e.rng = rand.New(source) -} - -func (e *TransactionEnv) isTraceable() bool { - return e.ctx.Tracer != nil && e.traceSpan != nil -} - // GetAccountsAuthorizedForContractUpdate returns a list of addresses authorized to update/deploy contracts func (e *TransactionEnv) GetAccountsAuthorizedForContractUpdate() []common.Address { return e.GetAuthorizedAccounts( @@ -1197,80 +1170,3 @@ func (e *TransactionEnv) getSigningAccounts() []runtime.Address { } return e.authorizers } - -func (e *TransactionEnv) ImplementationDebugLog(message string) error { - e.ctx.Logger.Debug().Msgf("Cadence: %s", message) - return nil -} - -func (e *TransactionEnv) RecordTrace(operation string, location common.Location, duration time.Duration, logs []opentracing.LogRecord) { - if !e.isTraceable() { - return - } - if location != nil { - if logs == nil { - logs = make([]opentracing.LogRecord, 0) - } - logs = append(logs, opentracing.LogRecord{Timestamp: time.Now(), - Fields: []traceLog.Field{traceLog.String("location", location.String())}, - }) - } - - spanName := trace.FVMCadenceTrace.Child(operation) - e.ctx.Tracer.RecordSpanFromParent(e.traceSpan, spanName, duration, logs) -} - -func (e *TransactionEnv) ProgramParsed(location common.Location, duration time.Duration) { - e.RecordTrace("parseProgram", location, duration, nil) - e.metrics.ProgramParsed(location, duration) -} - -func (e *TransactionEnv) ProgramChecked(location common.Location, duration time.Duration) { - e.RecordTrace("checkProgram", location, duration, nil) - e.metrics.ProgramChecked(location, duration) -} - -func (e *TransactionEnv) ProgramInterpreted(location common.Location, duration time.Duration) { - e.RecordTrace("interpretProgram", location, duration, nil) - e.metrics.ProgramInterpreted(location, duration) -} - -func (e *TransactionEnv) ValueEncoded(duration time.Duration) { - e.RecordTrace("encodeValue", nil, duration, nil) - e.metrics.ValueEncoded(duration) -} - -func (e *TransactionEnv) ValueDecoded(duration time.Duration) { - e.RecordTrace("decodeValue", nil, duration, nil) - e.metrics.ValueDecoded(duration) -} - -// Commit commits changes and return a list of updated keys -func (e *TransactionEnv) Commit() ([]programs.ContractUpdateKey, error) { - // commit changes and return a list of updated keys - err := e.programs.Cleanup() - if err != nil { - return nil, err - } - return e.contracts.Commit() -} - -func (e *TransactionEnv) BLSVerifyPOP(pk *runtime.PublicKey, sig []byte) (bool, error) { - return crypto.VerifyPOP(pk, sig) -} - -func (e *TransactionEnv) BLSAggregateSignatures(sigs [][]byte) ([]byte, error) { - return crypto.AggregateSignatures(sigs) -} - -func (e *TransactionEnv) BLSAggregatePublicKeys(keys []*runtime.PublicKey) (*runtime.PublicKey, error) { - return crypto.AggregatePublicKeys(keys) -} - -func (e *TransactionEnv) ResourceOwnerChanged( - *interpreter.Interpreter, - *interpreter.CompositeValue, - common.Address, - common.Address, -) { -} From dee368c42122261c98ed1c2ad45ca496f8eab459 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Tue, 12 Jul 2022 15:40:29 -0700 Subject: [PATCH 159/223] more cleanup --- access/handler.go | 2 ++ engine/access/rpc/engine.go | 13 +++++++++++-- engine/access/rpc/engine_builder.go | 8 ++++---- engine/execution/rpc/engine.go | 5 ++++- 4 files changed, 21 insertions(+), 7 deletions(-) diff --git a/access/handler.go b/access/handler.go index 0ae5aba0118..bed9ec1c148 100644 --- a/access/handler.go +++ b/access/handler.go @@ -22,6 +22,8 @@ type Handler struct { committee hotstuff.Committee } +type HandlerOption func(*Handler) + func NewHandler(api API, chain flow.Chain, committee hotstuff.Committee) *Handler { return &Handler{ api: api, diff --git a/engine/access/rpc/engine.go b/engine/access/rpc/engine.go index ba3a967ecf8..45d14db0341 100644 --- a/engine/access/rpc/engine.go +++ b/engine/access/rpc/engine.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "github.com/onflow/flow-go/consensus/hotstuff/committees" "net" "net/http" "sync" @@ -86,7 +87,7 @@ func NewBuilder(log zerolog.Logger, executionGRPCPort uint, retryEnabled bool, rpcMetricsEnabled bool, - apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the Access API e.g. Ping->100, GetTransaction->300 + apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the Access API e.g. Ping->100, GetTransaction->300 apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the Access API e.g. Ping->50, GetTransaction->10 ) (*RPCEngineBuilder, error) { @@ -184,7 +185,15 @@ func NewBuilder(log zerolog.Logger, chain: chainID.Chain(), } - builder := NewRPCEngineBuilder(eng, state) + // TODO: update to Replicas API once active PaceMaker is merged + // The second parameter (flow.Identifier) is only used by hotstuff internally and also + // going to be removed soon. For now, we set it to zero. + committee, err := committees.NewConsensusCommittee(state, flow.ZeroID) + if err != nil { + return nil, fmt.Errorf("failed to initialize hotstuff.Committee abstractiono: %w", err) + } + + builder := NewRPCEngineBuilder(eng, committee) if rpcMetricsEnabled { builder.WithMetrics() } diff --git a/engine/access/rpc/engine_builder.go b/engine/access/rpc/engine_builder.go index 636fe72c67e..dd678423efe 100644 --- a/engine/access/rpc/engine_builder.go +++ b/engine/access/rpc/engine_builder.go @@ -2,21 +2,21 @@ package rpc import ( grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + accessproto "github.com/onflow/flow/protobuf/go/flow/access" legacyaccessproto "github.com/onflow/flow/protobuf/go/flow/legacy/access" "github.com/onflow/flow-go/access" - "github.com/onflow/flow-go/state/protocol" - legacyaccess "github.com/onflow/flow-go/access/legacy" "github.com/onflow/flow-go/apiproxy" + "github.com/onflow/flow-go/consensus/hotstuff" ) // NewRPCEngineBuilder helps to build a new RPC engine. -func NewRPCEngineBuilder(engine *Engine, state protocol.State) *RPCEngineBuilder { +func NewRPCEngineBuilder(engine *Engine, committee hotstuff.Committee) *RPCEngineBuilder { builder := &RPCEngineBuilder{} builder.Engine = engine - builder.localAPIServer = access.NewHandler(builder.backend, builder.chain, state) + builder.localAPIServer = access.NewHandler(builder.backend, builder.chain, committee) return builder } diff --git a/engine/execution/rpc/engine.go b/engine/execution/rpc/engine.go index 9c8d09a718c..b7ded1d0804 100644 --- a/engine/execution/rpc/engine.go +++ b/engine/execution/rpc/engine.go @@ -57,7 +57,7 @@ func New( exeResults storage.ExecutionResults, txResults storage.TransactionResults, chainID flow.ChainID, - apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the gRPC API e.g. Ping->100, ExecuteScriptAtBlockID->300 + apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the gRPC API e.g. Ping->100, ExecuteScriptAtBlockID->300 apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the gRPC API e.g. Ping->50, ExecuteScriptAtBlockID->10 ) (*Engine, error) { log = log.With().Str("engine", "rpc").Logger() @@ -90,6 +90,9 @@ func New( server := grpc.NewServer(serverOptions...) + // TODO: update to Replicas API once active PaceMaker is merged + // The second parameter (flow.Identifier) is only used by hotstuff internally and also + // going to be removed soon. For now, we set it to zero. committee, err := committees.NewConsensusCommittee(state, flow.ZeroID) if err != nil { return nil, fmt.Errorf("initializing hotstuff.Committee abstraction failed: %w", err) From 5af9984d276b6e04d43c338f4304a59d84eb3ff1 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 18:44:00 -0400 Subject: [PATCH 160/223] update cluster tests --- network/message/authorization.go | 2 +- .../validator/pubsub/authorized_sender_validator_test.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index 1cc492ada8a..478c391e55d 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -118,7 +118,7 @@ func initializeMessageAuthConfigsMap() { return new(messages.BlockResponse) }, Config: map[channels.Channel]flow.RoleList{ - channels.SyncCommittee: {flow.RoleConsensus}, + channels.SyncCommittee: {flow.RoleConsensus}, }, } diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index 899e5d580aa..e7d7dd25e6d 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -130,12 +130,12 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ClusterPrefix // validate collection consensus cluster validateCollConsensus := AuthorizedSenderValidator(unittest.Logger(), channels.ConsensusCluster(clusterID), getIdentityFunc) - msgType, err := validateCollConsensus(ctx, pid, &messages.ClusterBlockResponse{}) + msgType, err := validateCollConsensus(ctx, pid, &messages.ClusterBlockProposal{}) require.NoError(s.T(), err) - require.Equal(s.T(), message.ClusterBlockResponse, msgType) + require.Equal(s.T(), message.ClusterBlockProposal, msgType) validateCollConsensusPubsub := AuthorizedSenderMessageValidator(unittest.Logger(), channels.ConsensusCluster(clusterID), getIdentityFunc) - pubsubResult := validateCollConsensusPubsub(ctx, pid, &messages.ClusterBlockResponse{}) + pubsubResult := validateCollConsensusPubsub(ctx, pid, &messages.ClusterBlockProposal{}) require.Equal(s.T(), pubsub.ValidationAccept, pubsubResult) // validate collection sync cluster From 1ff5d257cb8eae3faebb90e603b3dbd977991403 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Tue, 12 Jul 2022 15:44:47 -0700 Subject: [PATCH 161/223] organized imports --- access/handler.go | 5 +++-- consensus/hotstuff/signature/util.go | 1 + engine/common/rpc/convert/convert.go | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/access/handler.go b/access/handler.go index bed9ec1c148..f418887ebf8 100644 --- a/access/handler.go +++ b/access/handler.go @@ -7,12 +7,13 @@ import ( "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/timestamppb" + "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/signature" "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow/protobuf/go/flow/access" - "github.com/onflow/flow/protobuf/go/flow/entities" ) type Handler struct { diff --git a/consensus/hotstuff/signature/util.go b/consensus/hotstuff/signature/util.go index 623111fefbb..a5158fc26bb 100644 --- a/consensus/hotstuff/signature/util.go +++ b/consensus/hotstuff/signature/util.go @@ -2,6 +2,7 @@ package signature import ( "fmt" + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/signature" diff --git a/engine/common/rpc/convert/convert.go b/engine/common/rpc/convert/convert.go index d14e9eb8dab..223468dabce 100644 --- a/engine/common/rpc/convert/convert.go +++ b/engine/common/rpc/convert/convert.go @@ -7,12 +7,13 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" + "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/inmem" - "github.com/onflow/flow/protobuf/go/flow/entities" ) var ErrEmptyMessage = errors.New("protobuf message is empty") From 4793f3362090c9ed2d67437b34fa7c7122c324f9 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Tue, 12 Jul 2022 15:46:14 -0700 Subject: [PATCH 162/223] organized imports --- access/handler.go | 5 ++--- engine/common/rpc/convert/convert.go | 3 +-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/access/handler.go b/access/handler.go index f418887ebf8..430f659110c 100644 --- a/access/handler.go +++ b/access/handler.go @@ -3,13 +3,12 @@ package access import ( "context" + "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/timestamppb" - "github.com/onflow/flow/protobuf/go/flow/access" - "github.com/onflow/flow/protobuf/go/flow/entities" - "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/signature" "github.com/onflow/flow-go/engine/common/rpc/convert" diff --git a/engine/common/rpc/convert/convert.go b/engine/common/rpc/convert/convert.go index 223468dabce..4f506ead943 100644 --- a/engine/common/rpc/convert/convert.go +++ b/engine/common/rpc/convert/convert.go @@ -5,9 +5,8 @@ import ( "errors" "fmt" - "google.golang.org/protobuf/types/known/timestamppb" - "github.com/onflow/flow/protobuf/go/flow/entities" + "google.golang.org/protobuf/types/known/timestamppb" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" From eb625409e0af2ec5aa833ff8ef2a0f58880e0b0c Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Tue, 12 Jul 2022 16:02:04 -0700 Subject: [PATCH 163/223] organized imports --- engine/access/access_test.go | 6 +++--- engine/access/rpc/engine.go | 4 ++-- engine/execution/rpc/engine.go | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/engine/access/access_test.go b/engine/access/access_test.go index c640d0d8758..7eda3af9355 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -7,6 +7,9 @@ import ( "testing" "github.com/dgraph-io/badger/v2" + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + entitiesproto "github.com/onflow/flow/protobuf/go/flow/entities" + execproto "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -40,9 +43,6 @@ import ( "github.com/onflow/flow-go/storage/badger/operation" "github.com/onflow/flow-go/storage/util" "github.com/onflow/flow-go/utils/unittest" - accessproto "github.com/onflow/flow/protobuf/go/flow/access" - entitiesproto "github.com/onflow/flow/protobuf/go/flow/entities" - execproto "github.com/onflow/flow/protobuf/go/flow/execution" ) type Suite struct { diff --git a/engine/access/rpc/engine.go b/engine/access/rpc/engine.go index 45d14db0341..09621153c37 100644 --- a/engine/access/rpc/engine.go +++ b/engine/access/rpc/engine.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "github.com/onflow/flow-go/consensus/hotstuff/committees" "net" "net/http" "sync" @@ -17,6 +16,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "github.com/onflow/flow-go/consensus/hotstuff/committees" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/access/rest" "github.com/onflow/flow-go/engine/access/rpc/backend" @@ -87,7 +87,7 @@ func NewBuilder(log zerolog.Logger, executionGRPCPort uint, retryEnabled bool, rpcMetricsEnabled bool, - apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the Access API e.g. Ping->100, GetTransaction->300 + apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the Access API e.g. Ping->100, GetTransaction->300 apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the Access API e.g. Ping->50, GetTransaction->10 ) (*RPCEngineBuilder, error) { diff --git a/engine/execution/rpc/engine.go b/engine/execution/rpc/engine.go index b7ded1d0804..2fa0432910b 100644 --- a/engine/execution/rpc/engine.go +++ b/engine/execution/rpc/engine.go @@ -10,6 +10,7 @@ import ( "unicode/utf8" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/rs/zerolog" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -26,7 +27,6 @@ import ( "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/grpcutils" - "github.com/onflow/flow/protobuf/go/flow/execution" ) // Config defines the configurable options for the gRPC server. @@ -57,7 +57,7 @@ func New( exeResults storage.ExecutionResults, txResults storage.TransactionResults, chainID flow.ChainID, - apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the gRPC API e.g. Ping->100, ExecuteScriptAtBlockID->300 + apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the gRPC API e.g. Ping->100, ExecuteScriptAtBlockID->300 apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the gRPC API e.g. Ping->50, ExecuteScriptAtBlockID->10 ) (*Engine, error) { log = log.With().Str("engine", "rpc").Logger() From 948c6103bbef49efea284b5dd38b4e090d395c82 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 19:05:45 -0400 Subject: [PATCH 164/223] add UnknownMsgTypeErr struct to hold msg type value - move all errors declared in authorization.go -> errors.go --- network/message/authorization.go | 12 +------ network/message/errors.go | 32 +++++++++++++++++++ .../pubsub/authorized_sender_validator.go | 6 ++-- .../authorized_sender_validator_test.go | 4 +-- 4 files changed, 38 insertions(+), 16 deletions(-) create mode 100644 network/message/errors.go diff --git a/network/message/authorization.go b/network/message/authorization.go index 478c391e55d..e51fce82cb8 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -1,9 +1,6 @@ package message import ( - "errors" - "fmt" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/model/messages" @@ -40,13 +37,6 @@ func (m MsgAuthConfig) IsAuthorized(role flow.Role, channel channels.Channel) er return nil } -var ( - ErrUnknownMsgType = errors.New("could not get authorization config for unknown message type") - ErrUnauthorizedMessageOnChannel = errors.New("message is not authorized to be sent on channel") - ErrUnauthorizedRole = errors.New("sender role not authorized to send message on channel") - AuthorizationConfigs map[string]MsgAuthConfig -) - func initializeMessageAuthConfigsMap() { AuthorizationConfigs = make(map[string]MsgAuthConfig) @@ -357,6 +347,6 @@ func GetMessageAuthConfig(v interface{}) (MsgAuthConfig, error) { return AuthorizationConfigs[DKGMessage], nil default: - return MsgAuthConfig{}, fmt.Errorf("%w (%T)", ErrUnknownMsgType, v) + return MsgAuthConfig{}, NewUnknownMsgTypeErr(v) } } diff --git a/network/message/errors.go b/network/message/errors.go new file mode 100644 index 00000000000..87bec2b6abd --- /dev/null +++ b/network/message/errors.go @@ -0,0 +1,32 @@ +package message + +import ( + "errors" + "fmt" +) + +var ( + ErrUnauthorizedMessageOnChannel = errors.New("message is not authorized to be sent on channel") + ErrUnauthorizedRole = errors.New("sender role not authorized to send message on channel") + AuthorizationConfigs map[string]MsgAuthConfig +) + +// UnknownMsgTypeErr indicates that no message auth configured for the message type v +type UnknownMsgTypeErr struct { + MsgType interface{} +} + +func (e UnknownMsgTypeErr) Error() string { + return fmt.Sprintf("could not get authorization config for unknown message type: %T", e.MsgType) +} + +// NewUnknownMsgTypeErr returns a new ErrUnknownMsgType +func NewUnknownMsgTypeErr(msgType interface{}) UnknownMsgTypeErr { + return UnknownMsgTypeErr{MsgType: msgType} +} + +// IsUnknownMsgTypeErr returns whether an error is UnknownMsgTypeErr +func IsUnknownMsgTypeErr(err error) bool { + var e UnknownMsgTypeErr + return errors.As(err, &e) +} diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index 5b44115b691..1fd19c2d580 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -51,12 +51,12 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel channels.Channel, get switch { case err == nil: return msgType, nil + case message.IsUnknownMsgTypeErr(err): + slashingViolationsConsumer.OnUnknownMsgTypeError(identity, from.String(), msgType, err) + return msgType, err case errors.Is(err, message.ErrUnauthorizedMessageOnChannel) || errors.Is(err, message.ErrUnauthorizedRole): slashingViolationsConsumer.OnUnAuthorizedSenderError(identity, from.String(), msgType, err) return msgType, err - case errors.Is(err, message.ErrUnknownMsgType): - slashingViolationsConsumer.OnUnknownMsgTypeError(identity, from.String(), msgType, err) - return msgType, err case errors.Is(err, ErrSenderEjected): slashingViolationsConsumer.OnSenderEjectedError(identity, from.String(), msgType, err) return msgType, ErrSenderEjected diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index e7d7dd25e6d..99124c645d9 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -195,14 +195,14 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai // unknown message types are rejected msgType, err := validate(ctx, pid, m) - require.ErrorIs(s.T(), err, message.ErrUnknownMsgType) + require.True(s.T(), message.IsUnknownMsgTypeErr(err)) require.Equal(s.T(), "", msgType) pubsubResult := validatePubsub(ctx, pid, m) require.Equal(s.T(), pubsub.ValidationReject, pubsubResult) // nil messages are rejected msgType, err = validate(ctx, pid, nil) - require.ErrorIs(s.T(), err, message.ErrUnknownMsgType) + require.True(s.T(), message.IsUnknownMsgTypeErr(err)) require.Equal(s.T(), "", msgType) pubsubResult = validatePubsub(ctx, pid, nil) require.Equal(s.T(), pubsub.ValidationReject, pubsubResult) From 98249bcbc0f11906184a2746400a0f4f5444d17b Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 19:08:45 -0400 Subject: [PATCH 165/223] move AuthorizationConfigs declaration to authorization.go --- network/message/authorization.go | 2 ++ network/message/errors.go | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index e51fce82cb8..32df4bb8eb6 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -7,6 +7,8 @@ import ( "github.com/onflow/flow-go/network/channels" ) +var AuthorizationConfigs map[string]MsgAuthConfig + // MsgAuthConfig contains authorization information for a specific flow message. The authorization // is represented as a map from network channel -> list of all roles allowed to send the message on // the channel. diff --git a/network/message/errors.go b/network/message/errors.go index 87bec2b6abd..11b4a7e9798 100644 --- a/network/message/errors.go +++ b/network/message/errors.go @@ -8,7 +8,6 @@ import ( var ( ErrUnauthorizedMessageOnChannel = errors.New("message is not authorized to be sent on channel") ErrUnauthorizedRole = errors.New("sender role not authorized to send message on channel") - AuthorizationConfigs map[string]MsgAuthConfig ) // UnknownMsgTypeErr indicates that no message auth configured for the message type v From cf2ec3cbafcbff97f42bda8d6394e6b87213fffa Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Tue, 12 Jul 2022 16:24:02 -0700 Subject: [PATCH 166/223] :-( --- engine/execution/rpc/engine.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/engine/execution/rpc/engine.go b/engine/execution/rpc/engine.go index 2fa0432910b..cd1465f712a 100644 --- a/engine/execution/rpc/engine.go +++ b/engine/execution/rpc/engine.go @@ -10,11 +10,12 @@ import ( "unicode/utf8" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/rs/zerolog" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + + "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/committees" From 3a415dced69c21aa1d75ed66bfe2bdd4220a9fe2 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Tue, 12 Jul 2022 17:06:04 -0700 Subject: [PATCH 167/223] still trying to convince linter :-( :-( --- engine/execution/rpc/engine.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/engine/execution/rpc/engine.go b/engine/execution/rpc/engine.go index cd1465f712a..2fa0432910b 100644 --- a/engine/execution/rpc/engine.go +++ b/engine/execution/rpc/engine.go @@ -10,12 +10,11 @@ import ( "unicode/utf8" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/rs/zerolog" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - - "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/committees" From b5e0333740953c051441201837521280249a6341 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 20:20:39 -0400 Subject: [PATCH 168/223] unexport AuthorizationConfigs --- network/message/authorization.go | 98 ++++++++++++++++---------------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index 32df4bb8eb6..42bfe6ec9d7 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -7,7 +7,7 @@ import ( "github.com/onflow/flow-go/network/channels" ) -var AuthorizationConfigs map[string]MsgAuthConfig +var authorizationConfigs map[string]MsgAuthConfig // MsgAuthConfig contains authorization information for a specific flow message. The authorization // is represented as a map from network channel -> list of all roles allowed to send the message on @@ -40,10 +40,10 @@ func (m MsgAuthConfig) IsAuthorized(role flow.Role, channel channels.Channel) er } func initializeMessageAuthConfigsMap() { - AuthorizationConfigs = make(map[string]MsgAuthConfig) + authorizationConfigs = make(map[string]MsgAuthConfig) // consensus - AuthorizationConfigs[BlockProposal] = MsgAuthConfig{ + authorizationConfigs[BlockProposal] = MsgAuthConfig{ Name: BlockProposal, Type: func() interface{} { return new(messages.BlockProposal) @@ -53,7 +53,7 @@ func initializeMessageAuthConfigsMap() { channels.PushBlocks: {flow.RoleConsensus}, // channel alias ReceiveBlocks = PushBlocks }, } - AuthorizationConfigs[BlockVote] = MsgAuthConfig{ + authorizationConfigs[BlockVote] = MsgAuthConfig{ Name: BlockVote, Type: func() interface{} { return new(messages.BlockVote) @@ -64,7 +64,7 @@ func initializeMessageAuthConfigsMap() { } // protocol state sync - AuthorizationConfigs[SyncRequest] = MsgAuthConfig{ + authorizationConfigs[SyncRequest] = MsgAuthConfig{ Name: SyncRequest, Type: func() interface{} { return new(messages.SyncRequest) @@ -74,7 +74,7 @@ func initializeMessageAuthConfigsMap() { channels.SyncClusterPrefix: {flow.RoleCollection}, }, } - AuthorizationConfigs[SyncResponse] = MsgAuthConfig{ + authorizationConfigs[SyncResponse] = MsgAuthConfig{ Name: SyncResponse, Type: func() interface{} { return new(messages.SyncResponse) @@ -84,7 +84,7 @@ func initializeMessageAuthConfigsMap() { channels.SyncClusterPrefix: {flow.RoleCollection}, }, } - AuthorizationConfigs[RangeRequest] = MsgAuthConfig{ + authorizationConfigs[RangeRequest] = MsgAuthConfig{ Name: RangeRequest, Type: func() interface{} { return new(messages.RangeRequest) @@ -94,7 +94,7 @@ func initializeMessageAuthConfigsMap() { channels.SyncClusterPrefix: {flow.RoleCollection}, }, } - AuthorizationConfigs[BatchRequest] = MsgAuthConfig{ + authorizationConfigs[BatchRequest] = MsgAuthConfig{ Name: BatchRequest, Type: func() interface{} { return new(messages.BatchRequest) @@ -104,7 +104,7 @@ func initializeMessageAuthConfigsMap() { channels.SyncClusterPrefix: {flow.RoleCollection}, }, } - AuthorizationConfigs[BlockResponse] = MsgAuthConfig{ + authorizationConfigs[BlockResponse] = MsgAuthConfig{ Name: BlockResponse, Type: func() interface{} { return new(messages.BlockResponse) @@ -115,7 +115,7 @@ func initializeMessageAuthConfigsMap() { } // cluster consensus - AuthorizationConfigs[ClusterBlockProposal] = MsgAuthConfig{ + authorizationConfigs[ClusterBlockProposal] = MsgAuthConfig{ Name: ClusterBlockProposal, Type: func() interface{} { return new(messages.ClusterBlockProposal) @@ -124,7 +124,7 @@ func initializeMessageAuthConfigsMap() { channels.ConsensusClusterPrefix: {flow.RoleCollection}, }, } - AuthorizationConfigs[ClusterBlockVote] = MsgAuthConfig{ + authorizationConfigs[ClusterBlockVote] = MsgAuthConfig{ Name: ClusterBlockVote, Type: func() interface{} { return new(messages.ClusterBlockVote) @@ -133,7 +133,7 @@ func initializeMessageAuthConfigsMap() { channels.ConsensusClusterPrefix: {flow.RoleCollection}, }, } - AuthorizationConfigs[ClusterBlockResponse] = MsgAuthConfig{ + authorizationConfigs[ClusterBlockResponse] = MsgAuthConfig{ Name: ClusterBlockResponse, Type: func() interface{} { return new(messages.ClusterBlockResponse) @@ -144,7 +144,7 @@ func initializeMessageAuthConfigsMap() { } // collections, guarantees & transactions - AuthorizationConfigs[CollectionGuarantee] = MsgAuthConfig{ + authorizationConfigs[CollectionGuarantee] = MsgAuthConfig{ Name: CollectionGuarantee, Type: func() interface{} { return new(flow.CollectionGuarantee) @@ -153,7 +153,7 @@ func initializeMessageAuthConfigsMap() { channels.PushGuarantees: {flow.RoleCollection}, // channel alias ReceiveGuarantees = PushGuarantees }, } - AuthorizationConfigs[TransactionBody] = MsgAuthConfig{ + authorizationConfigs[TransactionBody] = MsgAuthConfig{ Name: TransactionBody, Type: func() interface{} { return new(flow.TransactionBody) @@ -164,7 +164,7 @@ func initializeMessageAuthConfigsMap() { } // core messages for execution & verification - AuthorizationConfigs[ExecutionReceipt] = MsgAuthConfig{ + authorizationConfigs[ExecutionReceipt] = MsgAuthConfig{ Name: ExecutionReceipt, Type: func() interface{} { return new(flow.ExecutionReceipt) @@ -173,7 +173,7 @@ func initializeMessageAuthConfigsMap() { channels.PushReceipts: {flow.RoleExecution}, // channel alias ReceiveReceipts = PushReceipts }, } - AuthorizationConfigs[ResultApproval] = MsgAuthConfig{ + authorizationConfigs[ResultApproval] = MsgAuthConfig{ Name: ResultApproval, Type: func() interface{} { return new(flow.ResultApproval) @@ -184,7 +184,7 @@ func initializeMessageAuthConfigsMap() { } // data exchange for execution of blocks - AuthorizationConfigs[ChunkDataRequest] = MsgAuthConfig{ + authorizationConfigs[ChunkDataRequest] = MsgAuthConfig{ Name: ChunkDataRequest, Type: func() interface{} { return new(messages.ChunkDataRequest) @@ -193,7 +193,7 @@ func initializeMessageAuthConfigsMap() { channels.RequestChunks: {flow.RoleVerification}, // channel alias RequestChunks = ProvideChunks }, } - AuthorizationConfigs[ChunkDataResponse] = MsgAuthConfig{ + authorizationConfigs[ChunkDataResponse] = MsgAuthConfig{ Name: ChunkDataResponse, Type: func() interface{} { return new(messages.ChunkDataResponse) @@ -204,7 +204,7 @@ func initializeMessageAuthConfigsMap() { } // result approvals - AuthorizationConfigs[ApprovalRequest] = MsgAuthConfig{ + authorizationConfigs[ApprovalRequest] = MsgAuthConfig{ Name: ApprovalRequest, Type: func() interface{} { return new(messages.ApprovalRequest) @@ -213,7 +213,7 @@ func initializeMessageAuthConfigsMap() { channels.RequestApprovalsByChunk: {flow.RoleConsensus}, // channel alias ProvideApprovalsByChunk = RequestApprovalsByChunk }, } - AuthorizationConfigs[ApprovalResponse] = MsgAuthConfig{ + authorizationConfigs[ApprovalResponse] = MsgAuthConfig{ Name: ApprovalResponse, Type: func() interface{} { return new(messages.ApprovalResponse) @@ -225,7 +225,7 @@ func initializeMessageAuthConfigsMap() { } // generic entity exchange engines - AuthorizationConfigs[EntityRequest] = MsgAuthConfig{ + authorizationConfigs[EntityRequest] = MsgAuthConfig{ Name: EntityRequest, Type: func() interface{} { return new(messages.EntityRequest) @@ -235,7 +235,7 @@ func initializeMessageAuthConfigsMap() { channels.RequestCollections: {flow.RoleAccess, flow.RoleExecution}, }, } - AuthorizationConfigs[EntityResponse] = MsgAuthConfig{ + authorizationConfigs[EntityResponse] = MsgAuthConfig{ Name: EntityResponse, Type: func() interface{} { return new(messages.EntityResponse) @@ -247,7 +247,7 @@ func initializeMessageAuthConfigsMap() { } // testing - AuthorizationConfigs[TestMessage] = MsgAuthConfig{ + authorizationConfigs[TestMessage] = MsgAuthConfig{ Name: TestMessage, Type: func() interface{} { return new(message.TestMessage) @@ -259,7 +259,7 @@ func initializeMessageAuthConfigsMap() { } // DKG - AuthorizationConfigs[DKGMessage] = MsgAuthConfig{ + authorizationConfigs[DKGMessage] = MsgAuthConfig{ Name: DKGMessage, Type: func() interface{} { return new(messages.DKGMessage) @@ -278,75 +278,75 @@ func GetMessageAuthConfig(v interface{}) (MsgAuthConfig, error) { switch v.(type) { // consensus case *messages.BlockProposal: - return AuthorizationConfigs[BlockProposal], nil + return authorizationConfigs[BlockProposal], nil case *messages.BlockVote: - return AuthorizationConfigs[BlockVote], nil + return authorizationConfigs[BlockVote], nil // protocol state sync case *messages.SyncRequest: - return AuthorizationConfigs[SyncRequest], nil + return authorizationConfigs[SyncRequest], nil case *messages.SyncResponse: - return AuthorizationConfigs[SyncResponse], nil + return authorizationConfigs[SyncResponse], nil case *messages.RangeRequest: - return AuthorizationConfigs[RangeRequest], nil + return authorizationConfigs[RangeRequest], nil case *messages.BatchRequest: - return AuthorizationConfigs[BatchRequest], nil + return authorizationConfigs[BatchRequest], nil case *messages.BlockResponse: - return AuthorizationConfigs[BlockResponse], nil + return authorizationConfigs[BlockResponse], nil // cluster consensus case *messages.ClusterBlockProposal: - return AuthorizationConfigs[ClusterBlockProposal], nil + return authorizationConfigs[ClusterBlockProposal], nil case *messages.ClusterBlockVote: - return AuthorizationConfigs[ClusterBlockVote], nil + return authorizationConfigs[ClusterBlockVote], nil case *messages.ClusterBlockResponse: - return AuthorizationConfigs[ClusterBlockResponse], nil + return authorizationConfigs[ClusterBlockResponse], nil // collections, guarantees & transactions case *flow.CollectionGuarantee: - return AuthorizationConfigs[CollectionGuarantee], nil + return authorizationConfigs[CollectionGuarantee], nil case *flow.TransactionBody: - return AuthorizationConfigs[TransactionBody], nil + return authorizationConfigs[TransactionBody], nil case *flow.Transaction: - return AuthorizationConfigs[Transaction], nil + return authorizationConfigs[Transaction], nil // core messages for execution & verification case *flow.ExecutionReceipt: - return AuthorizationConfigs[ExecutionReceipt], nil + return authorizationConfigs[ExecutionReceipt], nil case *flow.ResultApproval: - return AuthorizationConfigs[ResultApproval], nil + return authorizationConfigs[ResultApproval], nil // execution state synchronization case *messages.ExecutionStateSyncRequest: - return AuthorizationConfigs[ExecutionStateSyncRequest], nil + return authorizationConfigs[ExecutionStateSyncRequest], nil case *messages.ExecutionStateDelta: - return AuthorizationConfigs[ExecutionStateDelta], nil + return authorizationConfigs[ExecutionStateDelta], nil // data exchange for execution of blocks case *messages.ChunkDataRequest: - return AuthorizationConfigs[ChunkDataRequest], nil + return authorizationConfigs[ChunkDataRequest], nil case *messages.ChunkDataResponse: - return AuthorizationConfigs[ChunkDataResponse], nil + return authorizationConfigs[ChunkDataResponse], nil // result approvals case *messages.ApprovalRequest: - return AuthorizationConfigs[ApprovalRequest], nil + return authorizationConfigs[ApprovalRequest], nil case *messages.ApprovalResponse: - return AuthorizationConfigs[ApprovalResponse], nil + return authorizationConfigs[ApprovalResponse], nil // generic entity exchange engines case *messages.EntityRequest: - return AuthorizationConfigs[EntityRequest], nil + return authorizationConfigs[EntityRequest], nil case *messages.EntityResponse: - return AuthorizationConfigs[EntityResponse], nil + return authorizationConfigs[EntityResponse], nil // testing case *message.TestMessage: - return AuthorizationConfigs[TestMessage], nil + return authorizationConfigs[TestMessage], nil // dkg case *messages.DKGMessage: - return AuthorizationConfigs[DKGMessage], nil + return authorizationConfigs[DKGMessage], nil default: return MsgAuthConfig{}, NewUnknownMsgTypeErr(v) From 800f5d1a57a9a6cfbbc2ebc78d3875ac4357a406 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 12 Jul 2022 20:44:13 -0400 Subject: [PATCH 169/223] add GetAllMessageAuthConfigs func --- network/message/authorization.go | 10 ++++++++++ .../pubsub/authorized_sender_validator_test.go | 8 +++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index 42bfe6ec9d7..d60a52ce9b8 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -352,3 +352,13 @@ func GetMessageAuthConfig(v interface{}) (MsgAuthConfig, error) { return MsgAuthConfig{}, NewUnknownMsgTypeErr(v) } } + +// GetAllMessageAuthConfigs returns all the configured message auth configurations. +func GetAllMessageAuthConfigs() []MsgAuthConfig { + configs := make([]MsgAuthConfig, 0) + for _, config := range authorizationConfigs { + configs = append(configs, config) + } + + return configs +} diff --git a/network/validator/pubsub/authorized_sender_validator_test.go b/network/validator/pubsub/authorized_sender_validator_test.go index 99124c645d9..a9869cb21f0 100644 --- a/network/validator/pubsub/authorized_sender_validator_test.go +++ b/network/validator/pubsub/authorized_sender_validator_test.go @@ -233,7 +233,7 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai // initializeAuthorizationTestCases initializes happy and sad path test cases for checking authorized and unauthorized role message combinations. func (s *TestAuthorizedSenderValidatorSuite) initializeAuthorizationTestCases() { - for _, c := range message.AuthorizationConfigs { + for _, c := range message.GetAllMessageAuthConfigs() { for channel, authorizedRoles := range c.Config { for _, role := range flow.Roles() { identity, _ := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(role)) @@ -260,13 +260,15 @@ func (s *TestAuthorizedSenderValidatorSuite) initializeAuthorizationTestCases() // initializeInvalidMessageOnChannelTestCases initializes test cases for all possible combinations of invalid message types on channel. // NOTE: the role in the test case does not matter since ErrUnauthorizedMessageOnChannel will be returned before the role is checked. func (s *TestAuthorizedSenderValidatorSuite) initializeInvalidMessageOnChannelTestCases() { + configs := message.GetAllMessageAuthConfigs() + // iterate all channels - for _, c := range message.AuthorizationConfigs { + for _, c := range configs { for channel, authorizedRoles := range c.Config { identity, _ := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(authorizedRoles[0])) // iterate all message types - for _, config := range message.AuthorizationConfigs { + for _, config := range configs { // include test if message type is not authorized on channel _, ok := config.Config[channel] From a97141b4b28add973f884821e752f43834b2343c Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Tue, 12 Jul 2022 17:46:34 -0700 Subject: [PATCH 170/223] next try to convince linter --- engine/execution/rpc/engine.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/engine/execution/rpc/engine.go b/engine/execution/rpc/engine.go index 2fa0432910b..de405e99eeb 100644 --- a/engine/execution/rpc/engine.go +++ b/engine/execution/rpc/engine.go @@ -57,11 +57,10 @@ func New( exeResults storage.ExecutionResults, txResults storage.TransactionResults, chainID flow.ChainID, - apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the gRPC API e.g. Ping->100, ExecuteScriptAtBlockID->300 + apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the gRPC API e.g. Ping->100, ExecuteScriptAtBlockID->300 apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the gRPC API e.g. Ping->50, ExecuteScriptAtBlockID->10 ) (*Engine, error) { log = log.With().Str("engine", "rpc").Logger() - if config.MaxMsgSize == 0 { config.MaxMsgSize = grpcutils.DefaultMaxMsgSize } From 48f129319f9c322d73ab57084be5c3fbdf6c73d4 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Tue, 12 Jul 2022 18:14:43 -0700 Subject: [PATCH 171/223] wip --- access/handler.go | 33 ++++-- consensus/hotstuff/committee.go | 9 ++ .../signature/block_signer_decoder.go | 82 ++++++++++++++ .../signature/block_signer_decoder_test.go | 100 ++++++++++++++++++ consensus/hotstuff/signature/util.go | 32 ------ engine/access/rpc/engine.go | 3 +- engine/access/rpc/engine_builder.go | 4 +- state/errors.go | 20 ++++ 8 files changed, 237 insertions(+), 46 deletions(-) create mode 100644 consensus/hotstuff/signature/block_signer_decoder.go create mode 100644 consensus/hotstuff/signature/block_signer_decoder_test.go delete mode 100644 consensus/hotstuff/signature/util.go diff --git a/access/handler.go b/access/handler.go index 430f659110c..d350bef3905 100644 --- a/access/handler.go +++ b/access/handler.go @@ -16,20 +16,23 @@ import ( ) type Handler struct { - api API - chain flow.Chain - // TODO: update to Replicas API once active PaceMaker is merged - committee hotstuff.Committee + api API + chain flow.Chain + sgnIdcsDecoder hotstuff.BlockSignerDecoder } type HandlerOption func(*Handler) -func NewHandler(api API, chain flow.Chain, committee hotstuff.Committee) *Handler { - return &Handler{ - api: api, - chain: chain, - committee: committee, +func NewHandler(api API, chain flow.Chain, options ...HandlerOption) *Handler { + h := &Handler{ + api: api, + chain: chain, + sgnIdcsDecoder: &signature.NoopBlockSignerDecoder{}, } + for _, opt := range options { + opt(h) + } + return h } // Ping the Access API server for a response. @@ -486,7 +489,7 @@ func (h *Handler) GetExecutionResultForBlockID(ctx context.Context, req *access. } func (h *Handler) blockResponse(block *flow.Block, fullResponse bool) (*access.BlockResponse, error) { - signerIDs, err := signature.DecodeSignerIDs(h.committee, block.Header) + signerIDs, err := h.sgnIdcsDecoder.DecodeSignerIDs(block.Header) if err != nil { return nil, err } @@ -506,7 +509,7 @@ func (h *Handler) blockResponse(block *flow.Block, fullResponse bool) (*access.B } func (h *Handler) blockHeaderResponse(header *flow.Header) (*access.BlockHeaderResponse, error) { - signerIDs, err := signature.DecodeSignerIDs(h.committee, header) + signerIDs, err := h.sgnIdcsDecoder.DecodeSignerIDs(header) if err != nil { return nil, err } @@ -556,3 +559,11 @@ func blockEventsToMessage(block flow.BlockEvents) (*access.EventsResponse_Result Events: eventMessages, }, nil } + +// WithBlockSignerDecoder configures the Handler to decode signer indices +// via the provided hotstuff.BlockSignerDecoder +func WithBlockSignerDecoder(sgnIdcsDecoder hotstuff.BlockSignerDecoder) func(*Handler) { + return func(handler *Handler) { + handler.sgnIdcsDecoder = sgnIdcsDecoder + } +} diff --git a/consensus/hotstuff/committee.go b/consensus/hotstuff/committee.go index 09f62f43e5d..0027894ec29 100644 --- a/consensus/hotstuff/committee.go +++ b/consensus/hotstuff/committee.go @@ -49,6 +49,15 @@ type Committee interface { DKG(blockID flow.Identifier) (DKG, error) } +type BlockSignerDecoder interface { + // DecodeSignerIDs decodes the signer indices from the given block header into full node IDs. + // Expected Error returns during normal operations: + // * state.UnknownBlockError if block has not been ingested yet + // * signature.InvalidSignerIndicesError if signer indices included in the header do + // not encode a valid subset of the consensus committee + DecodeSignerIDs(header *flow.Header) (flow.IdentifierList, error) +} + type DKG interface { protocol.DKG } diff --git a/consensus/hotstuff/signature/block_signer_decoder.go b/consensus/hotstuff/signature/block_signer_decoder.go new file mode 100644 index 00000000000..59fdc4577d6 --- /dev/null +++ b/consensus/hotstuff/signature/block_signer_decoder.go @@ -0,0 +1,82 @@ +package signature + +import ( + "errors" + "fmt" + "github.com/onflow/flow-go/state" + "github.com/onflow/flow-go/storage" + + "github.com/onflow/flow-go/consensus/hotstuff" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/signature" +) + +// BlockSignerDecoder is a wrapper around the `hotstuff.Committee`, which implements +// the auxilluary logic for de-coding signer indices of a block (header) to full node IDs +type BlockSignerDecoder struct { + // TODO: update to Replicas API once active PaceMaker is merged + hotstuff.Committee +} + +func NewBlockSignerDecoder(committee hotstuff.Committee) *BlockSignerDecoder { + return &BlockSignerDecoder{committee} +} + +var _ hotstuff.BlockSignerDecoder = (*BlockSignerDecoder)(nil) + +// DecodeSignerIDs decodes the signer indices from the given block header into full node IDs. +// Expected Error returns during normal operations: +// * state.UnknownBlockError if block has not been ingested yet +// * signature.InvalidSignerIndicesError if signer indices included in the header do +// not encode a valid subset of the consensus committee +func (b *BlockSignerDecoder) DecodeSignerIDs(header *flow.Header) (flow.IdentifierList, error) { + // root block does not have signer indices + if header.ParentVoterIndices == nil && header.View == 0 { + return []flow.Identifier{}, nil + } + + id := header.ID() + members, err := b.Identities(id) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return nil, state.NewUnknownBlockErrorf("block %v has not been processed yet: %w", id, err) + } + return nil, fmt.Errorf("fail to retrieve identities for block %v: %w", id, err) + } + signerIDs, err := signature.DecodeSignerIndicesToIdentifiers(members.NodeIDs(), header.ParentVoterIndices) + if err != nil { + return nil, fmt.Errorf("could not decode signer indices for block %v: %w", header.ID(), err) + } + + return signerIDs, nil +} + +// NoopBlockSignerDecoder does not decode any signer indices and consistenlty return nil +type NoopBlockSignerDecoder struct{} + +func (b *NoopBlockSignerDecoder) DecodeSignerIDs(_ *flow.Header) ([]flow.Identifier, error) { + return nil, nil +} + +// DecodeSignerIDs decodes the signer indices from the given block header, and finds the signer identifiers from protocol state +// Expected Error returns during normal operations: +// * storage.ErrNotFound if block not found for the given header +// * signature.InvalidSignerIndicesError if `signerIndices` does not encode a valid subset of the consensus committee +// TODO: change `protocol.State` to `Replicas` API once active PaceMaker is merged +func DecodeSignerIDs(committee hotstuff.Committee, header *flow.Header) ([]flow.Identifier, error) { + // root block does not have signer indices + if header.ParentVoterIndices == nil && header.View == 0 { + return []flow.Identifier{}, nil + } + + members, err := committee.Identities(header.ID()) + if err != nil { + return nil, fmt.Errorf("fail to retrieve identities for block %v: %w", header.ID(), err) + } + signerIDs, err := signature.DecodeSignerIndicesToIdentifiers(members.NodeIDs(), header.ParentVoterIndices) + if err != nil { + return nil, fmt.Errorf("could not decode signer indices for block %v: %w", header.ID(), err) + } + + return signerIDs, nil +} diff --git a/consensus/hotstuff/signature/block_signer_decoder_test.go b/consensus/hotstuff/signature/block_signer_decoder_test.go new file mode 100644 index 00000000000..acdd5b07057 --- /dev/null +++ b/consensus/hotstuff/signature/block_signer_decoder_test.go @@ -0,0 +1,100 @@ +package signature + +import ( + "errors" + hotstuff "github.com/onflow/flow-go/consensus/hotstuff/mocks" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/order" + "github.com/onflow/flow-go/module/signature" + "github.com/onflow/flow-go/state" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/unittest" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "testing" +) + +func TestBlockSignerDecoder(t *testing.T) { + suite.Run(t, new(blockSignerDecoderSuite)) +} + +type blockSignerDecoderSuite struct { + suite.Suite + allConsensus flow.IdentityList + committee *hotstuff.Committee + + decoder *BlockSignerDecoder + block flow.Block +} + +func (s *blockSignerDecoderSuite) SetupTest() { + // the default header fixture creates signerIDs for a committee of 10 nodes, so we prepare a committee same as that + s.allConsensus = unittest.IdentityListFixture(40, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) + + // mock consensus committee + s.committee = new(hotstuff.Committee) + s.committee.On("Identities", mock.Anything).Return(s.allConsensus, nil) + + // prepare valid test block: + voterIndices, err := signature.EncodeSignersToIndices(s.allConsensus.NodeIDs(), s.allConsensus.NodeIDs()) + require.NoError(s.T(), err) + s.block = unittest.BlockFixture() + s.block.Header.ParentVoterIndices = voterIndices + + s.decoder = NewBlockSignerDecoder(s.committee) +} + +// Test_SuccessfulDecode tests happy path decoding +func (s *blockSignerDecoderSuite) Test_SuccessfulDecode() { + ids, err := s.decoder.DecodeSignerIDs(s.block.Header) + require.NoError(s.T(), err) + require.Equal(s.T(), s.allConsensus.NodeIDs(), ids) +} + +// Test_RootBlock tests decoder accepts root block with empty signer indices +func (s *blockSignerDecoderSuite) Test_RootBlock() { + s.block.Header.ParentVoterIndices = nil + s.block.Header.ParentVoterSigData = nil + s.block.Header.View = 0 + + ids, err := s.decoder.DecodeSignerIDs(s.block.Header) + require.NoError(s.T(), err) + require.Empty(s.T(), ids) +} + +// Test_UnknownBlock tests handling of an unknwon block. +// At the moment, hotstuff.Committee returns an storage.ErrNotFound for an unknown block, +// which we expect the `BlockSignerDecoder` to wrap into a `state.UnknownBlockError` +func (s *blockSignerDecoderSuite) Test_UnknownBlock() { + *s.committee = hotstuff.Committee{} + s.committee.On("Identities", mock.Anything).Return(nil, storage.ErrNotFound) + + ids, err := s.decoder.DecodeSignerIDs(s.block.Header) + require.Empty(s.T(), ids) + require.True(s.T(), state.IsUnknownBlockError(err)) +} + +// Test_UnexpectedCommitteeException verifies that `BlockSignerDecoder` +// does _not_ erroneously interpret an unexpecgted exception from the committee as +// a sign of an unknown block, i.e. the decouder should _not_ return an `state.UnknownBlockError` +func (s *blockSignerDecoderSuite) Test_UnexpectedCommitteeException() { + exception := errors.New("unexpected exception") + *s.committee = hotstuff.Committee{} + s.committee.On("Identities", mock.Anything).Return(nil, exception) + + ids, err := s.decoder.DecodeSignerIDs(s.block.Header) + require.Empty(s.T(), ids) + require.False(s.T(), state.IsUnknownBlockError(err)) + require.True(s.T(), errors.Is(err, exception)) +} + +// Test_InvalidIndices verifies that `BlockSignerDecoder` returns +// signature.InvalidSignerIndicesError is the signer indices in the provided header +// are not a valid encoding. +func (s *blockSignerDecoderSuite) Test_InvalidIndices() { + s.block.Header.ParentVoterIndices = unittest.RandomBytes(1) + ids, err := s.decoder.DecodeSignerIDs(s.block.Header) + require.Empty(s.T(), ids) + require.True(s.T(), signature.IsInvalidSignerIndicesError(err)) +} diff --git a/consensus/hotstuff/signature/util.go b/consensus/hotstuff/signature/util.go deleted file mode 100644 index a5158fc26bb..00000000000 --- a/consensus/hotstuff/signature/util.go +++ /dev/null @@ -1,32 +0,0 @@ -package signature - -import ( - "fmt" - - "github.com/onflow/flow-go/consensus/hotstuff" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/signature" -) - -// DecodeSignerIDs decodes the signer indices from the given block header, and finds the signer identifiers from protocol state -// Expected Error returns during normal operations: -// * storage.ErrNotFound if block not found for the given header -// * signature.InvalidSignerIndicesError if `signerIndices` does not encode a valid subset of the consensus committee -// TODO: change `protocol.State` to `Replicas` API once active PaceMaker is merged -func DecodeSignerIDs(committee hotstuff.Committee, header *flow.Header) ([]flow.Identifier, error) { - // root block does not have signer indices - if header.ParentVoterIndices == nil && header.View == 0 { - return []flow.Identifier{}, nil - } - - members, err := committee.Identities(header.ID()) - if err != nil { - return nil, fmt.Errorf("fail to retrieve identities for block %v: %w", header.ID(), err) - } - signerIDs, err := signature.DecodeSignerIndicesToIdentifiers(members.NodeIDs(), header.ParentVoterIndices) - if err != nil { - return nil, fmt.Errorf("could not decode signer indices for block %v: %w", header.ID(), err) - } - - return signerIDs, nil -} diff --git a/engine/access/rpc/engine.go b/engine/access/rpc/engine.go index 09621153c37..7c6f10d16ad 100644 --- a/engine/access/rpc/engine.go +++ b/engine/access/rpc/engine.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "github.com/onflow/flow-go/consensus/hotstuff/signature" "net" "net/http" "sync" @@ -193,7 +194,7 @@ func NewBuilder(log zerolog.Logger, return nil, fmt.Errorf("failed to initialize hotstuff.Committee abstractiono: %w", err) } - builder := NewRPCEngineBuilder(eng, committee) + builder := NewRPCEngineBuilder(eng, signature.NewBlockSignerDecoder(committee)) if rpcMetricsEnabled { builder.WithMetrics() } diff --git a/engine/access/rpc/engine_builder.go b/engine/access/rpc/engine_builder.go index dd678423efe..1936d46d94b 100644 --- a/engine/access/rpc/engine_builder.go +++ b/engine/access/rpc/engine_builder.go @@ -13,10 +13,10 @@ import ( ) // NewRPCEngineBuilder helps to build a new RPC engine. -func NewRPCEngineBuilder(engine *Engine, committee hotstuff.Committee) *RPCEngineBuilder { +func NewRPCEngineBuilder(engine *Engine, sgnIdcsDecoder hotstuff.BlockSignerDecoder) *RPCEngineBuilder { builder := &RPCEngineBuilder{} builder.Engine = engine - builder.localAPIServer = access.NewHandler(builder.backend, builder.chain, committee) + builder.localAPIServer = access.NewHandler(builder.backend, builder.chain, access.WithBlockSignerDecoder(sgnIdcsDecoder)) return builder } diff --git a/state/errors.go b/state/errors.go index becea097404..b74c740f4f8 100644 --- a/state/errors.go +++ b/state/errors.go @@ -90,3 +90,23 @@ func (e NoValidChildBlockError) Error() string { func IsNoValidChildBlockError(err error) bool { return errors.As(err, &NoValidChildBlockError{}) } + +// UnknownBlockError is a sentinel error indicating that a certain block +// has not been ingested yet. +type UnknownBlockError struct { + err error +} + +func NewUnknownBlockErrorf(msg string, args ...interface{}) error { + return UnknownBlockError{ + err: fmt.Errorf(msg, args...), + } +} + +func (e UnknownBlockError) Unwrap() error { return e.err } +func (e UnknownBlockError) Error() string { return e.err.Error() } + +func IsUnknownBlockError(err error) bool { + var e UnknownBlockError + return errors.As(err, &e) +} From da0831eb8329671aa37621fac57f8e1e7cb449eb Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Tue, 12 Jul 2022 18:58:50 -0700 Subject: [PATCH 172/223] attempt failed --- .../hotstuff/mocks/block_signer_decoder.go | 52 +++++++++++++ .../signature/block_signer_decoder.go | 2 +- engine/access/access_test.go | 76 +++++++------------ 3 files changed, 80 insertions(+), 50 deletions(-) create mode 100644 consensus/hotstuff/mocks/block_signer_decoder.go diff --git a/consensus/hotstuff/mocks/block_signer_decoder.go b/consensus/hotstuff/mocks/block_signer_decoder.go new file mode 100644 index 00000000000..4159e52620f --- /dev/null +++ b/consensus/hotstuff/mocks/block_signer_decoder.go @@ -0,0 +1,52 @@ +// Code generated by mockery v2.13.0. DO NOT EDIT. + +package mocks + +import ( + flow "github.com/onflow/flow-go/model/flow" + + mock "github.com/stretchr/testify/mock" +) + +// BlockSignerDecoder is an autogenerated mock type for the BlockSignerDecoder type +type BlockSignerDecoder struct { + mock.Mock +} + +// DecodeSignerIDs provides a mock function with given fields: header +func (_m *BlockSignerDecoder) DecodeSignerIDs(header *flow.Header) (flow.IdentifierList, error) { + ret := _m.Called(header) + + var r0 flow.IdentifierList + if rf, ok := ret.Get(0).(func(*flow.Header) flow.IdentifierList); ok { + r0 = rf(header) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.IdentifierList) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(*flow.Header) error); ok { + r1 = rf(header) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type NewBlockSignerDecoderT interface { + mock.TestingT + Cleanup(func()) +} + +// NewBlockSignerDecoder creates a new instance of BlockSignerDecoder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewBlockSignerDecoder(t NewBlockSignerDecoderT) *BlockSignerDecoder { + mock := &BlockSignerDecoder{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/hotstuff/signature/block_signer_decoder.go b/consensus/hotstuff/signature/block_signer_decoder.go index 59fdc4577d6..c70fc6813a1 100644 --- a/consensus/hotstuff/signature/block_signer_decoder.go +++ b/consensus/hotstuff/signature/block_signer_decoder.go @@ -54,7 +54,7 @@ func (b *BlockSignerDecoder) DecodeSignerIDs(header *flow.Header) (flow.Identifi // NoopBlockSignerDecoder does not decode any signer indices and consistenlty return nil type NoopBlockSignerDecoder struct{} -func (b *NoopBlockSignerDecoder) DecodeSignerIDs(_ *flow.Header) ([]flow.Identifier, error) { +func (b *NoopBlockSignerDecoder) DecodeSignerIDs(_ *flow.Header) (flow.IdentifierList, error) { return nil, nil } diff --git a/engine/access/access_test.go b/engine/access/access_test.go index 7eda3af9355..f41defbeb41 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -17,10 +17,8 @@ import ( "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/access" - "github.com/onflow/flow-go/consensus/hotstuff" - "github.com/onflow/flow-go/consensus/hotstuff/committees" + hsmock "github.com/onflow/flow-go/consensus/hotstuff/mocks" "github.com/onflow/flow-go/consensus/hotstuff/model" - consig "github.com/onflow/flow-go/consensus/hotstuff/signature" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/engine/access/ingestion" accessmock "github.com/onflow/flow-go/engine/access/mock" @@ -47,19 +45,20 @@ import ( type Suite struct { suite.Suite - state *protocol.State - committee hotstuff.Committee - snapshot *protocol.Snapshot - epochQuery *protocol.EpochQuery - log zerolog.Logger - net *mocknetwork.Network - request *module.Requester - collClient *accessmock.AccessAPIClient - execClient *accessmock.ExecutionAPIClient - me *module.Local - chainID flow.ChainID - metrics *metrics.NoopCollector - backend *backend.Backend + state *protocol.State + snapshot *protocol.Snapshot + epochQuery *protocol.EpochQuery + signerIndicesDecoder *hsmock.BlockSignerDecoder + signerIds flow.IdentifierList + log zerolog.Logger + net *mocknetwork.Network + request *module.Requester + collClient *accessmock.AccessAPIClient + execClient *accessmock.ExecutionAPIClient + me *module.Local + chainID flow.ChainID + metrics *metrics.NoopCollector + backend *backend.Backend } // TestAccess tests scenarios which exercise multiple API calls using both the RPC handler and the ingest engine @@ -69,13 +68,10 @@ func TestAccess(t *testing.T) { } func (suite *Suite) SetupTest() { - var err error suite.log = zerolog.New(os.Stderr) suite.net = new(mocknetwork.Network) suite.snapshot = new(protocol.Snapshot) suite.state = new(protocol.State) - suite.committee, err = committees.NewConsensusCommittee(suite.state, flow.ZeroID) - suite.Require().NoError(err) suite.epochQuery = new(protocol.EpochQuery) suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() @@ -94,6 +90,10 @@ func (suite *Suite) SetupTest() { suite.me = new(module.Local) + suite.signerIds = unittest.IdentifierListFixture(4) + suite.signerIndicesDecoder = new(hsmock.BlockSignerDecoder) + suite.signerIndicesDecoder.On("DecodeSignerIDs", mock.Anything).Return(suite.signerIds, nil).Maybe() + accessIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleAccess)) suite.me. On("NodeID"). @@ -132,8 +132,7 @@ func (suite *Suite) RunTest( backend.DefaultSnapshotHistoryLimit, ) - handler := access.NewHandler(suite.backend, suite.chainID.Chain(), suite.committee) - + handler := access.NewHandler(suite.backend, suite.chainID.Chain(), access.WithBlockSignerDecoder(suite.signerIndicesDecoder)) f(handler, db, blocks, headers, results) }) } @@ -308,7 +307,7 @@ func (suite *Suite) TestSendTransactionToRandomCollectionNode() { backend.DefaultSnapshotHistoryLimit, ) - handler := access.NewHandler(backend, suite.chainID.Chain(), suite.committee) + handler := access.NewHandler(backend, suite.chainID.Chain()) // Send transaction 1 resp, err := handler.SendTransaction(context.Background(), sendReq1) @@ -347,40 +346,24 @@ func (suite *Suite) TestSendTransactionToRandomCollectionNode() { func (suite *Suite) TestGetBlockByIDAndHeight() { suite.RunTest(func(handler *access.Handler, db *badger.DB, blocks *storage.Blocks, _ *storage.Headers, _ *storage.ExecutionResults) { - // the default header fixture creates a signerIDs out of 10 nodes committee, so we prepare a committee same as that - allConsensus := unittest.IdentityListFixture(40, unittest.WithRole(flow.RoleConsensus)) - - voterIndices, err := signature.EncodeSignersToIndices(allConsensus.NodeIDs(), allConsensus.NodeIDs()) - require.NoError(suite.T(), err) // test block1 get by ID block1 := unittest.BlockFixture() - block1.Header.ParentVoterIndices = voterIndices // test block2 get by height block2 := unittest.BlockFixture() block2.Header.Height = 2 - block2.Header.ParentVoterIndices = voterIndices require.NoError(suite.T(), blocks.Store(&block1)) require.NoError(suite.T(), blocks.Store(&block2)) // the follower logic should update height index on the block storage when a block is finalized - err = db.Update(operation.IndexBlockHeight(block2.Header.Height, block2.ID())) + err := db.Update(operation.IndexBlockHeight(block2.Header.Height, block2.ID())) require.NoError(suite.T(), err) - snapshotForSignerIDs := new(protocol.Snapshot) - snapshotForSignerIDs.On("Identities", mock.Anything).Return(allConsensus, nil) - suite.state.On("AtBlockID", block1.Header.ID()).Return(snapshotForSignerIDs, nil) - suite.state.On("AtBlockID", block2.Header.ID()).Return(snapshotForSignerIDs, nil) - assertHeaderResp := func(resp *accessproto.BlockHeaderResponse, err error, header *flow.Header) { - suite.state.On("AtBlockID", header.ID()).Return(snapshotForSignerIDs, nil) - expectedSignerIDs, err := consig.DecodeSignerIDs(suite.committee, header) - require.NoError(suite.T(), err) - require.NoError(suite.T(), err) require.NotNil(suite.T(), resp) actual := *resp.Block - expectedMessage, err := convert.BlockHeaderToMessage(header, expectedSignerIDs) + expectedMessage, err := convert.BlockHeaderToMessage(header, suite.signerIds) require.NoError(suite.T(), err) require.Equal(suite.T(), *expectedMessage, actual) expectedBlockHeader, err := convert.MessageToBlockHeader(&actual) @@ -392,10 +375,7 @@ func (suite *Suite) TestGetBlockByIDAndHeight() { require.NoError(suite.T(), err) require.NotNil(suite.T(), resp) actual := resp.Block - suite.state.On("AtBlockID", block.Header.ID()).Return(snapshotForSignerIDs, nil) - expectedSignerIDs, err := consig.DecodeSignerIDs(suite.committee, block.Header) - require.NoError(suite.T(), err) - expectedMessage, err := convert.BlockToMessage(block, expectedSignerIDs) + expectedMessage, err := convert.BlockToMessage(block, suite.signerIds) require.NoError(suite.T(), err) require.Equal(suite.T(), expectedMessage, actual) expectedBlock, err := convert.MessageToBlock(resp.Block) @@ -414,7 +394,6 @@ func (suite *Suite) TestGetBlockByIDAndHeight() { suite.Run("get header 1 by ID", func() { // get header by ID id := block1.ID() - suite.state.On("AtBlockID", block1.Header.ID()).Return(snapshotForSignerIDs, nil) req := &accessproto.GetBlockHeaderByIDRequest{ Id: id[:], } @@ -638,7 +617,7 @@ func (suite *Suite) TestGetSealedTransaction() { backend.DefaultSnapshotHistoryLimit, ) - handler := access.NewHandler(backend, suite.chainID.Chain(), suite.committee) + handler := access.NewHandler(backend, suite.chainID.Chain()) rpcEngBuilder, err := rpc.NewBuilder(suite.log, suite.state, rpc.Config{}, nil, nil, blocks, headers, collections, transactions, receipts, results, suite.chainID, metrics, metrics, 0, 0, false, false, nil, nil) @@ -731,7 +710,7 @@ func (suite *Suite) TestExecuteScript() { backend.DefaultSnapshotHistoryLimit, ) - handler := access.NewHandler(suite.backend, suite.chainID.Chain(), suite.committee) + handler := access.NewHandler(suite.backend, suite.chainID.Chain()) // initialize metrics related storage metrics := metrics.NewNoopCollector() @@ -743,8 +722,7 @@ func (suite *Suite) TestExecuteScript() { require.NoError(suite.T(), err) conduit := new(mocknetwork.Conduit) - suite.net.On("Register", network.ReceiveReceipts, mock.Anything).Return(conduit, nil). - Once() + suite.net.On("Register", network.ReceiveReceipts, mock.Anything).Return(conduit, nil).Once() // create the ingest engine ingestEng, err := ingestion.New(suite.log, suite.net, suite.state, suite.me, suite.request, blocks, headers, collections, transactions, results, receipts, metrics, collectionsToMarkFinalized, collectionsToMarkExecuted, blocksToMarkExecuted, nil) From 57982646076f033b76a9fcbfab1726ca41df8c5b Mon Sep 17 00:00:00 2001 From: Tony Z Date: Tue, 12 Jul 2022 11:10:09 -0700 Subject: [PATCH 173/223] [localnet] Putting limit to EXECUTION config to avoid potential confusion --- integration/localnet/Makefile | 6 ++++++ integration/localnet/README.md | 1 + 2 files changed, 7 insertions(+) diff --git a/integration/localnet/Makefile b/integration/localnet/Makefile index 967f6026434..6771d8ba03c 100644 --- a/integration/localnet/Makefile +++ b/integration/localnet/Makefile @@ -1,6 +1,7 @@ COLLECTION = 3 CONSENSUS = 3 EXECUTION = 2 +VALID_EXECUTION := $(shell test $(EXECUTION) -ge 2; echo $$?) VERIFICATION = 1 ACCESS = 2 OBSERVER = 0 @@ -22,6 +23,10 @@ VERSION=localnetbuild .PHONY: init init: +ifeq ($(strip $(VALID_EXECUTION)), 1) + # multiple execution nodes are required to prevent seals being generated in case of execution forking. + $(error Number of Execution nodes should be no less than 2) +else go run -tags relic \ -ldflags="-X 'github.com/onflow/flow-go/cmd/build.commit=${COMMIT}' \ -X 'github.com/onflow/flow-go/cmd/build.semver=${VERSION}'" \ @@ -42,6 +47,7 @@ init: -extensive-tracing=$(EXTENSIVE_TRACING) \ -consensus-delay=$(CONSENSUS_DELAY) \ -collection-delay=$(COLLECTION_DELAY) +endif # Creates a light version of the localnet with just 1 instance for each node type .PHONY: init-light diff --git a/integration/localnet/README.md b/integration/localnet/README.md index 4c02e24fa65..17e1edb0d4f 100644 --- a/integration/localnet/README.md +++ b/integration/localnet/README.md @@ -48,6 +48,7 @@ Specify the number of nodes for each role: ```sh make -e COLLECTION=2 CONSENSUS=5 EXECUTION=3 VERIFICATION=2 ACCESS=2 init ``` +*NOTE: number of execution nodes should be no less than 2. It is to avoid seals being created in case of execution forks.* Specify the number of collector clusters: From 4c769a69f5c70d4a1cbd890ce978a7ab8b52fc6b Mon Sep 17 00:00:00 2001 From: Tony Z Date: Tue, 12 Jul 2022 15:15:26 -0700 Subject: [PATCH 174/223] [localnet] also apply the same limit check for Consensus node --- integration/localnet/Makefile | 3 +++ integration/localnet/README.md | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/integration/localnet/Makefile b/integration/localnet/Makefile index 6771d8ba03c..13fd5d0a28c 100644 --- a/integration/localnet/Makefile +++ b/integration/localnet/Makefile @@ -1,5 +1,6 @@ COLLECTION = 3 CONSENSUS = 3 +VALID_CONSENSUS := $(shell test $(CONSENSUS) -ge 2; echo $$?) EXECUTION = 2 VALID_EXECUTION := $(shell test $(EXECUTION) -ge 2; echo $$?) VERIFICATION = 1 @@ -26,6 +27,8 @@ init: ifeq ($(strip $(VALID_EXECUTION)), 1) # multiple execution nodes are required to prevent seals being generated in case of execution forking. $(error Number of Execution nodes should be no less than 2) +else ifeq ($(strip $(VALID_CONSENSUS)), 1) + $(error Number of Consensus nodes should be no less than 2) else go run -tags relic \ -ldflags="-X 'github.com/onflow/flow-go/cmd/build.commit=${COMMIT}' \ diff --git a/integration/localnet/README.md b/integration/localnet/README.md index 17e1edb0d4f..ed692d3c362 100644 --- a/integration/localnet/README.md +++ b/integration/localnet/README.md @@ -48,7 +48,7 @@ Specify the number of nodes for each role: ```sh make -e COLLECTION=2 CONSENSUS=5 EXECUTION=3 VERIFICATION=2 ACCESS=2 init ``` -*NOTE: number of execution nodes should be no less than 2. It is to avoid seals being created in case of execution forks.* +*NOTE: number of execution\consensus nodes should be no less than 2. It is to avoid seals being created in case of execution forks.* Specify the number of collector clusters: From ef1fffe84c2562779ae4f59a442b1368d0d9c78c Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Tue, 12 Jul 2022 20:16:44 -0700 Subject: [PATCH 175/223] wip --- access/handler.go | 1 + .../node_builder/access_node_builder.go | 10 ++-- .../signature/block_signer_decoder.go | 8 ++- .../signature/block_signer_decoder_test.go | 10 ++-- engine/access/rpc/engine.go | 14 +---- engine/access/rpc/engine_builder.go | 58 +++++++++++++------ 6 files changed, 61 insertions(+), 40 deletions(-) diff --git a/access/handler.go b/access/handler.go index d350bef3905..0393ac42369 100644 --- a/access/handler.go +++ b/access/handler.go @@ -21,6 +21,7 @@ type Handler struct { sgnIdcsDecoder hotstuff.BlockSignerDecoder } +// HandlerOption is used to hand over optional constructor parameters type HandlerOption func(*Handler) func NewHandler(api API, chain flow.Chain, options ...HandlerOption) *Handler { diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index 1d84856914b..a70755367a4 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -28,7 +28,7 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/committees" consensuspubsub "github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub" - hotsignature "github.com/onflow/flow-go/consensus/hotstuff/signature" + "github.com/onflow/flow-go/consensus/hotstuff/signature" "github.com/onflow/flow-go/consensus/hotstuff/verification" recovery "github.com/onflow/flow-go/consensus/recovery/protocol" "github.com/onflow/flow-go/crypto" @@ -277,7 +277,7 @@ func (builder *FlowAccessNodeBuilder) buildFollowerCore() *FlowAccessNodeBuilder // state when the follower detects newly finalized blocks final := finalizer.NewFinalizer(node.DB, node.Storage.Headers, builder.FollowerState, node.Tracer) - packer := hotsignature.NewConsensusSigDataPacker(builder.Committee) + packer := signature.NewConsensusSigDataPacker(builder.Committee) // initialize the verifier for the protocol consensus verifier := verification.NewCombinedVerifier(builder.Committee, packer) @@ -826,8 +826,10 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { return nil, err } - engineBuilder.WithLegacy() - builder.RpcEng = engineBuilder.Build() + builder.RpcEng = engineBuilder. + WithLegacy(). + WithBlockSignerDecoder(signature.NewBlockSignerDecoder(builder.Committee)). + Build() return builder.RpcEng, nil }). Component("ingestion engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { diff --git a/consensus/hotstuff/signature/block_signer_decoder.go b/consensus/hotstuff/signature/block_signer_decoder.go index c70fc6813a1..920f21962a3 100644 --- a/consensus/hotstuff/signature/block_signer_decoder.go +++ b/consensus/hotstuff/signature/block_signer_decoder.go @@ -3,12 +3,12 @@ package signature import ( "errors" "fmt" - "github.com/onflow/flow-go/state" - "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/signature" + "github.com/onflow/flow-go/state" + "github.com/onflow/flow-go/storage" ) // BlockSignerDecoder is a wrapper around the `hotstuff.Committee`, which implements @@ -54,6 +54,10 @@ func (b *BlockSignerDecoder) DecodeSignerIDs(header *flow.Header) (flow.Identifi // NoopBlockSignerDecoder does not decode any signer indices and consistenlty return nil type NoopBlockSignerDecoder struct{} +func NewNoopBlockSignerDecoder() *NoopBlockSignerDecoder { + return &NoopBlockSignerDecoder{} +} + func (b *NoopBlockSignerDecoder) DecodeSignerIDs(_ *flow.Header) (flow.IdentifierList, error) { return nil, nil } diff --git a/consensus/hotstuff/signature/block_signer_decoder_test.go b/consensus/hotstuff/signature/block_signer_decoder_test.go index acdd5b07057..9bf63e9b4b2 100644 --- a/consensus/hotstuff/signature/block_signer_decoder_test.go +++ b/consensus/hotstuff/signature/block_signer_decoder_test.go @@ -2,6 +2,12 @@ package signature import ( "errors" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + hotstuff "github.com/onflow/flow-go/consensus/hotstuff/mocks" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/order" @@ -9,10 +15,6 @@ import ( "github.com/onflow/flow-go/state" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/unittest" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "testing" ) func TestBlockSignerDecoder(t *testing.T) { diff --git a/engine/access/rpc/engine.go b/engine/access/rpc/engine.go index 7c6f10d16ad..bcbe9ea8c6c 100644 --- a/engine/access/rpc/engine.go +++ b/engine/access/rpc/engine.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "github.com/onflow/flow-go/consensus/hotstuff/signature" "net" "net/http" "sync" @@ -17,7 +16,6 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" - "github.com/onflow/flow-go/consensus/hotstuff/committees" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/access/rest" "github.com/onflow/flow-go/engine/access/rpc/backend" @@ -88,7 +86,7 @@ func NewBuilder(log zerolog.Logger, executionGRPCPort uint, retryEnabled bool, rpcMetricsEnabled bool, - apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the Access API e.g. Ping->100, GetTransaction->300 + apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the Access API e.g. Ping->100, GetTransaction->300 apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the Access API e.g. Ping->50, GetTransaction->10 ) (*RPCEngineBuilder, error) { @@ -186,15 +184,7 @@ func NewBuilder(log zerolog.Logger, chain: chainID.Chain(), } - // TODO: update to Replicas API once active PaceMaker is merged - // The second parameter (flow.Identifier) is only used by hotstuff internally and also - // going to be removed soon. For now, we set it to zero. - committee, err := committees.NewConsensusCommittee(state, flow.ZeroID) - if err != nil { - return nil, fmt.Errorf("failed to initialize hotstuff.Committee abstractiono: %w", err) - } - - builder := NewRPCEngineBuilder(eng, signature.NewBlockSignerDecoder(committee)) + builder := NewRPCEngineBuilder(eng) if rpcMetricsEnabled { builder.WithMetrics() } diff --git a/engine/access/rpc/engine_builder.go b/engine/access/rpc/engine_builder.go index 1936d46d94b..f677321e59e 100644 --- a/engine/access/rpc/engine_builder.go +++ b/engine/access/rpc/engine_builder.go @@ -2,6 +2,7 @@ package rpc import ( grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/onflow/flow-go/consensus/hotstuff/signature" accessproto "github.com/onflow/flow/protobuf/go/flow/access" legacyaccessproto "github.com/onflow/flow/protobuf/go/flow/legacy/access" @@ -13,25 +14,38 @@ import ( ) // NewRPCEngineBuilder helps to build a new RPC engine. -func NewRPCEngineBuilder(engine *Engine, sgnIdcsDecoder hotstuff.BlockSignerDecoder) *RPCEngineBuilder { - builder := &RPCEngineBuilder{} - builder.Engine = engine - builder.localAPIServer = access.NewHandler(builder.backend, builder.chain, access.WithBlockSignerDecoder(sgnIdcsDecoder)) - return builder +func NewRPCEngineBuilder(engine *Engine) *RPCEngineBuilder { + return &RPCEngineBuilder{ + Engine: engine, + sgnIdcsDecoder: signature.NewNoopBlockSignerDecoder(), + } } type RPCEngineBuilder struct { *Engine - // Use the parent interface instead of implementation, so that we can assign it to proxy. - localAPIServer accessproto.AccessAPIServer + + router *apiproxy.FlowAccessAPIRouter // this is set through `WithRouting`; or nil if not explicitly specified + sgnIdcsDecoder hotstuff.BlockSignerDecoder } -func (builder *RPCEngineBuilder) WithRouting(router *apiproxy.FlowAccessAPIRouter) { - router.SetLocalAPI(builder.localAPIServer) - builder.localAPIServer = router +// WithRouting specifies that the given router should be used as primary access API. +// Returns self-reference for chaining. +func (builder *RPCEngineBuilder) WithRouting(router *apiproxy.FlowAccessAPIRouter) *RPCEngineBuilder { + builder.router = router + return builder } -func (builder *RPCEngineBuilder) WithLegacy() { +// WithBlockSignerDecoder specifies that signer indices in block headers should be translated +// to full node IDs with the given decoder. +// Returns self-reference for chaining. +func (builder *RPCEngineBuilder) WithBlockSignerDecoder(sgnIdcsDecoder hotstuff.BlockSignerDecoder) *RPCEngineBuilder { + builder.sgnIdcsDecoder = sgnIdcsDecoder + return builder +} + +// WithLegacy specifies that a legacy access API should be instantiated +// Returns self-reference for chaining. +func (builder *RPCEngineBuilder) WithLegacy() *RPCEngineBuilder { // Register legacy gRPC handlers for backwards compatibility, to be removed at a later date legacyaccessproto.RegisterAccessAPIServer( builder.unsecureGrpcServer, @@ -41,21 +55,29 @@ func (builder *RPCEngineBuilder) WithLegacy() { builder.secureGrpcServer, legacyaccess.NewHandler(builder.backend, builder.chain), ) + return builder } -func (builder *RPCEngineBuilder) WithMetrics() { +// WithMetrics specifies the metrics should be collected. +// Returns self-reference for chaining. +func (builder *RPCEngineBuilder) WithMetrics() *RPCEngineBuilder { // Not interested in legacy metrics, so initialize here grpc_prometheus.EnableHandlingTimeHistogram() grpc_prometheus.Register(builder.unsecureGrpcServer) grpc_prometheus.Register(builder.secureGrpcServer) -} - -func (builder *RPCEngineBuilder) withRegisterRPC() { - accessproto.RegisterAccessAPIServer(builder.unsecureGrpcServer, builder.localAPIServer) - accessproto.RegisterAccessAPIServer(builder.secureGrpcServer, builder.localAPIServer) + return builder } func (builder *RPCEngineBuilder) Build() *Engine { - builder.withRegisterRPC() + var localAPIServer accessproto.AccessAPIServer = access.NewHandler(builder.backend, builder.chain, access.WithBlockSignerDecoder(builder.sgnIdcsDecoder)) + + if builder.router != nil { + builder.router.SetLocalAPI(localAPIServer) + localAPIServer = builder.router + } + + accessproto.RegisterAccessAPIServer(builder.unsecureGrpcServer, localAPIServer) + accessproto.RegisterAccessAPIServer(builder.secureGrpcServer, localAPIServer) + return builder.Engine } From cde99154d9b53932df058d7feecb6613448f1ce6 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Tue, 12 Jul 2022 20:21:36 -0700 Subject: [PATCH 176/223] linted code --- engine/access/access_test.go | 6 +++--- engine/access/rpc/engine.go | 3 +-- engine/access/rpc/engine_builder.go | 3 +-- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/engine/access/access_test.go b/engine/access/access_test.go index f41defbeb41..d157dd0cd9d 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -16,10 +16,11 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/access" hsmock "github.com/onflow/flow-go/consensus/hotstuff/mocks" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/engine/access/ingestion" accessmock "github.com/onflow/flow-go/engine/access/mock" "github.com/onflow/flow-go/engine/access/rpc" @@ -621,8 +622,7 @@ func (suite *Suite) TestGetSealedTransaction() { rpcEngBuilder, err := rpc.NewBuilder(suite.log, suite.state, rpc.Config{}, nil, nil, blocks, headers, collections, transactions, receipts, results, suite.chainID, metrics, metrics, 0, 0, false, false, nil, nil) - rpcEngBuilder.WithLegacy() - rpcEng := rpcEngBuilder.Build() + rpcEng := rpcEngBuilder.WithLegacy().Build() require.NoError(suite.T(), err) // create the ingest engine diff --git a/engine/access/rpc/engine.go b/engine/access/rpc/engine.go index bcbe9ea8c6c..981134f7733 100644 --- a/engine/access/rpc/engine.go +++ b/engine/access/rpc/engine.go @@ -86,10 +86,9 @@ func NewBuilder(log zerolog.Logger, executionGRPCPort uint, retryEnabled bool, rpcMetricsEnabled bool, - apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the Access API e.g. Ping->100, GetTransaction->300 + apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the Access API e.g. Ping->100, GetTransaction->300 apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the Access API e.g. Ping->50, GetTransaction->10 ) (*RPCEngineBuilder, error) { - log = log.With().Str("engine", "rpc").Logger() if config.MaxMsgSize == 0 { diff --git a/engine/access/rpc/engine_builder.go b/engine/access/rpc/engine_builder.go index f677321e59e..f172cf1ccf3 100644 --- a/engine/access/rpc/engine_builder.go +++ b/engine/access/rpc/engine_builder.go @@ -2,8 +2,6 @@ package rpc import ( grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/onflow/flow-go/consensus/hotstuff/signature" - accessproto "github.com/onflow/flow/protobuf/go/flow/access" legacyaccessproto "github.com/onflow/flow/protobuf/go/flow/legacy/access" @@ -11,6 +9,7 @@ import ( legacyaccess "github.com/onflow/flow-go/access/legacy" "github.com/onflow/flow-go/apiproxy" "github.com/onflow/flow-go/consensus/hotstuff" + "github.com/onflow/flow-go/consensus/hotstuff/signature" ) // NewRPCEngineBuilder helps to build a new RPC engine. From 5e1473b72ce5d9bc3866cf3d163e0ab9e010f7f3 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Tue, 12 Jul 2022 20:40:52 -0700 Subject: [PATCH 177/223] reducing unnecessary changes --- consensus/hotstuff/signature/block_signer_decoder.go | 3 ++- engine/access/access_test.go | 2 +- engine/access/rpc/engine.go | 1 + engine/common/rpc/convert/convert.go | 4 ++-- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/consensus/hotstuff/signature/block_signer_decoder.go b/consensus/hotstuff/signature/block_signer_decoder.go index 920f21962a3..64d917b07d9 100644 --- a/consensus/hotstuff/signature/block_signer_decoder.go +++ b/consensus/hotstuff/signature/block_signer_decoder.go @@ -51,7 +51,8 @@ func (b *BlockSignerDecoder) DecodeSignerIDs(header *flow.Header) (flow.Identifi return signerIDs, nil } -// NoopBlockSignerDecoder does not decode any signer indices and consistenlty return nil +// NoopBlockSignerDecoder does not decode any signer indices and consistently returns +// nil for the signing node IDs (auxiliary data) type NoopBlockSignerDecoder struct{} func NewNoopBlockSignerDecoder() *NoopBlockSignerDecoder { diff --git a/engine/access/access_test.go b/engine/access/access_test.go index d157dd0cd9d..401f5880028 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -71,8 +71,8 @@ func TestAccess(t *testing.T) { func (suite *Suite) SetupTest() { suite.log = zerolog.New(os.Stderr) suite.net = new(mocknetwork.Network) - suite.snapshot = new(protocol.Snapshot) suite.state = new(protocol.State) + suite.snapshot = new(protocol.Snapshot) suite.epochQuery = new(protocol.EpochQuery) suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() diff --git a/engine/access/rpc/engine.go b/engine/access/rpc/engine.go index 981134f7733..a4a45ff1a0a 100644 --- a/engine/access/rpc/engine.go +++ b/engine/access/rpc/engine.go @@ -89,6 +89,7 @@ func NewBuilder(log zerolog.Logger, apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the Access API e.g. Ping->100, GetTransaction->300 apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the Access API e.g. Ping->50, GetTransaction->10 ) (*RPCEngineBuilder, error) { + log = log.With().Str("engine", "rpc").Logger() if config.MaxMsgSize == 0 { diff --git a/engine/common/rpc/convert/convert.go b/engine/common/rpc/convert/convert.go index 4f506ead943..509f7c6ed86 100644 --- a/engine/common/rpc/convert/convert.go +++ b/engine/common/rpc/convert/convert.go @@ -137,7 +137,7 @@ func TransactionToMessage(tb flow.TransactionBody) *entities.Transaction { } } -func BlockHeaderToMessage(h *flow.Header, signerIDs []flow.Identifier) (*entities.BlockHeader, error) { +func BlockHeaderToMessage(h *flow.Header, signerIDs flow.IdentifierList) (*entities.BlockHeader, error) { id := h.ID() t := timestamppb.New(h.Timestamp) @@ -247,7 +247,7 @@ func MessagesToExecutionResults(m []*entities.ExecutionResult) ([]*flow.Executio return execResults, nil } -func BlockToMessage(h *flow.Block, signerIDs []flow.Identifier) (*entities.Block, error) { +func BlockToMessage(h *flow.Block, signerIDs flow.IdentifierList) (*entities.Block, error) { id := h.ID() From 1f9417c27bea136c760898de2899b64a42f0f2f9 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Tue, 12 Jul 2022 21:00:30 -0700 Subject: [PATCH 178/223] complete and mature draft --- cmd/execution_builder.go | 12 ++++++---- .../signature/block_signer_decoder.go | 23 ------------------- engine/execution/rpc/engine.go | 21 +++++------------ engine/execution/rpc/engine_test.go | 6 ++--- 4 files changed, 16 insertions(+), 46 deletions(-) diff --git a/cmd/execution_builder.go b/cmd/execution_builder.go index 30ce6453e0c..c65dee56b39 100644 --- a/cmd/execution_builder.go +++ b/cmd/execution_builder.go @@ -27,9 +27,10 @@ import ( storageCommands "github.com/onflow/flow-go/admin/commands/storage" uploaderCommands "github.com/onflow/flow-go/admin/commands/uploader" "github.com/onflow/flow-go/consensus" + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/committees" "github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub" - hotsignature "github.com/onflow/flow-go/consensus/hotstuff/signature" + "github.com/onflow/flow-go/consensus/hotstuff/signature" "github.com/onflow/flow-go/consensus/hotstuff/verification" recovery "github.com/onflow/flow-go/consensus/recovery/protocol" followereng "github.com/onflow/flow-go/engine/common/follower" @@ -180,6 +181,7 @@ func (e *ExecutionNodeBuilder) LoadComponentsAndModules() { executionDataServiceCollector module.ExecutionDataServiceMetrics executionState state.ExecutionState followerState protocol.MutableState + committee hotstuff.Committee ledgerStorage *ledger.Ledger events *storage.Events serviceEvents *storage.ServiceEvents @@ -681,12 +683,13 @@ func (e *ExecutionNodeBuilder) LoadComponentsAndModules() { // initialize consensus committee's membership state // This committee state is for the HotStuff follower, which follows the MAIN CONSENSUS Committee // Note: node.Me.NodeID() is not part of the consensus committee - committee, err := committees.NewConsensusCommittee(node.State, node.Me.NodeID()) + var err error + committee, err = committees.NewConsensusCommittee(node.State, node.Me.NodeID()) if err != nil { return nil, fmt.Errorf("could not create Committee state for main consensus: %w", err) } - packer := hotsignature.NewConsensusSigDataPacker(committee) + packer := signature.NewConsensusSigDataPacker(committee) // initialize the verifier for the protocol consensus verifier := verification.NewCombinedVerifier(committee, packer) @@ -788,9 +791,10 @@ func (e *ExecutionNodeBuilder) LoadComponentsAndModules() { results, txResults, node.RootChainID, + signature.NewBlockSignerDecoder(committee), e.exeConf.apiRatelimits, e.exeConf.apiBurstlimits, - ) + ), nil }) } diff --git a/consensus/hotstuff/signature/block_signer_decoder.go b/consensus/hotstuff/signature/block_signer_decoder.go index 64d917b07d9..a7a23172a2b 100644 --- a/consensus/hotstuff/signature/block_signer_decoder.go +++ b/consensus/hotstuff/signature/block_signer_decoder.go @@ -62,26 +62,3 @@ func NewNoopBlockSignerDecoder() *NoopBlockSignerDecoder { func (b *NoopBlockSignerDecoder) DecodeSignerIDs(_ *flow.Header) (flow.IdentifierList, error) { return nil, nil } - -// DecodeSignerIDs decodes the signer indices from the given block header, and finds the signer identifiers from protocol state -// Expected Error returns during normal operations: -// * storage.ErrNotFound if block not found for the given header -// * signature.InvalidSignerIndicesError if `signerIndices` does not encode a valid subset of the consensus committee -// TODO: change `protocol.State` to `Replicas` API once active PaceMaker is merged -func DecodeSignerIDs(committee hotstuff.Committee, header *flow.Header) ([]flow.Identifier, error) { - // root block does not have signer indices - if header.ParentVoterIndices == nil && header.View == 0 { - return []flow.Identifier{}, nil - } - - members, err := committee.Identities(header.ID()) - if err != nil { - return nil, fmt.Errorf("fail to retrieve identities for block %v: %w", header.ID(), err) - } - signerIDs, err := signature.DecodeSignerIndicesToIdentifiers(members.NodeIDs(), header.ParentVoterIndices) - if err != nil { - return nil, fmt.Errorf("could not decode signer indices for block %v: %w", header.ID(), err) - } - - return signerIDs, nil -} diff --git a/engine/execution/rpc/engine.go b/engine/execution/rpc/engine.go index de405e99eeb..36f3dcf8126 100644 --- a/engine/execution/rpc/engine.go +++ b/engine/execution/rpc/engine.go @@ -17,8 +17,6 @@ import ( "google.golang.org/grpc/status" "github.com/onflow/flow-go/consensus/hotstuff" - "github.com/onflow/flow-go/consensus/hotstuff/committees" - "github.com/onflow/flow-go/consensus/hotstuff/signature" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/engine/common/rpc/convert" @@ -57,9 +55,10 @@ func New( exeResults storage.ExecutionResults, txResults storage.TransactionResults, chainID flow.ChainID, + sgnIdcsDecoder hotstuff.BlockSignerDecoder, apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the gRPC API e.g. Ping->100, ExecuteScriptAtBlockID->300 apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the gRPC API e.g. Ping->50, ExecuteScriptAtBlockID->10 -) (*Engine, error) { +) *Engine { log = log.With().Str("engine", "rpc").Logger() if config.MaxMsgSize == 0 { config.MaxMsgSize = grpcutils.DefaultMaxMsgSize @@ -89,14 +88,6 @@ func New( server := grpc.NewServer(serverOptions...) - // TODO: update to Replicas API once active PaceMaker is merged - // The second parameter (flow.Identifier) is only used by hotstuff internally and also - // going to be removed soon. For now, we set it to zero. - committee, err := committees.NewConsensusCommittee(state, flow.ZeroID) - if err != nil { - return nil, fmt.Errorf("initializing hotstuff.Committee abstraction failed: %w", err) - } - eng := &Engine{ log: log, unit: engine.NewUnit(), @@ -106,7 +97,7 @@ func New( blocks: blocks, headers: headers, state: state, - committee: committee, + sgnIdcsDecoder: sgnIdcsDecoder, events: events, exeResults: exeResults, transactionResults: txResults, @@ -123,7 +114,7 @@ func New( execution.RegisterExecutionAPIServer(eng.server, eng.handler) - return eng, nil + return eng } // Ready returns a ready channel that is closed once the engine has fully @@ -165,7 +156,7 @@ type handler struct { blocks storage.Blocks headers storage.Headers state protocol.State - committee hotstuff.Committee + sgnIdcsDecoder hotstuff.BlockSignerDecoder events storage.Events exeResults storage.ExecutionResults transactionResults storage.TransactionResults @@ -567,7 +558,7 @@ func (h *handler) GetBlockHeaderByID( } func (h *handler) blockHeaderResponse(header *flow.Header) (*execution.BlockHeaderResponse, error) { - signerIDs, err := signature.DecodeSignerIDs(h.committee, header) + signerIDs, err := h.sgnIdcsDecoder.DecodeSignerIDs(header) if err != nil { return nil, fmt.Errorf("failed to decode signer indices to Identifiers for block %v: %w", header.ID(), err) } diff --git a/engine/execution/rpc/engine_test.go b/engine/execution/rpc/engine_test.go index adfa3e0ff36..f1331655306 100644 --- a/engine/execution/rpc/engine_test.go +++ b/engine/execution/rpc/engine_test.go @@ -55,13 +55,11 @@ func (suite *Suite) TestExecuteScriptAtBlockID() { // setup dummy request/response ctx := context.Background() - rawId := []byte("dummy ID") - mockIdentifier, err := convert.BlockID(rawId) - suite.Require().NoError(err) + mockIdentifier := unittest.IdentifierFixture() script := []byte("dummy script") arguments := [][]byte(nil) executionReq := execution.ExecuteScriptAtBlockIDRequest{ - BlockId: rawId[:], + BlockId: mockIdentifier[:], Script: script, } scriptExecValue := []byte{9, 10, 11} From 680237c94a06456bfd3e1aa851316584b71cee3c Mon Sep 17 00:00:00 2001 From: Kay-Zee Date: Wed, 13 Jul 2022 09:38:43 -0700 Subject: [PATCH 179/223] Add staging net --- cmd/bootstrap/cmd/block.go | 4 ++-- cmd/bootstrap/cmd/keygen.go | 2 +- cmd/bootstrap/cmd/machine_account.go | 6 +++++- cmd/scaffold.go | 4 ++-- flips/component-interface.md | 2 +- fvm/systemcontracts/system_contracts.go | 12 ++++++------ fvm/systemcontracts/system_contracts_test.go | 6 +++--- k8s/local/flow-collection-node-deployment.yml | 2 +- k8s/local/flow-consensus-node-deployment.yml | 2 +- k8s/local/flow-execution-node-deployment.yml | 2 +- k8s/local/flow-verification-node-deployment.yml | 2 +- k8s/staging/flow-collection-node-deployment.yml | 2 +- k8s/staging/flow-consensus-node-deployment.yml | 2 +- k8s/staging/flow-execution-node-deployment.yml | 2 +- k8s/staging/flow-verification-node-deployment.yml | 2 +- model/flow/address_test.go | 8 ++++---- model/flow/chain.go | 14 +++++++------- module/component/component.go | 2 +- 18 files changed, 40 insertions(+), 36 deletions(-) diff --git a/cmd/bootstrap/cmd/block.go b/cmd/bootstrap/cmd/block.go index f9eb263db7d..fe5692d7a59 100644 --- a/cmd/bootstrap/cmd/block.go +++ b/cmd/bootstrap/cmd/block.go @@ -26,8 +26,8 @@ func parseChainID(chainID string) flow.ChainID { return flow.Mainnet case "test": return flow.Testnet - case "canary": - return flow.Canary + case "staging": + return flow.Stagingnet case "bench": return flow.Benchnet case "local": diff --git a/cmd/bootstrap/cmd/keygen.go b/cmd/bootstrap/cmd/keygen.go index a13f21b1630..13a042d273f 100644 --- a/cmd/bootstrap/cmd/keygen.go +++ b/cmd/bootstrap/cmd/keygen.go @@ -104,7 +104,7 @@ func init() { // optional parameters, used for generating machine account files keygenCmd.Flags().BoolVar(&flagDefaultMachineAccount, "machine-account", false, "whether or not to generate a default (same as networking key) machine account key file") - keygenCmd.Flags().StringVar(&flagRootChain, "root-chain", "local", "chain ID for the root block (can be 'main', 'test', 'canary', 'bench', or 'local'") + keygenCmd.Flags().StringVar(&flagRootChain, "root-chain", "local", "chain ID for the root block (can be 'main', 'test', 'staging', 'bench', or 'local'") } // isEmptyDir returns True if the directory contains children diff --git a/cmd/bootstrap/cmd/machine_account.go b/cmd/bootstrap/cmd/machine_account.go index c80e1f7c9a3..0128f86a72c 100644 --- a/cmd/bootstrap/cmd/machine_account.go +++ b/cmd/bootstrap/cmd/machine_account.go @@ -121,7 +121,11 @@ func validateMachineAccountAddress(addressStr string) error { return nil } if flow.Testnet.Chain().IsValid(address) { - log.Warn().Msgf("Machine account address (%s) is **TESTNET/CANARY** address - ensure this is desired before continuing", address) + log.Warn().Msgf("Machine account address (%s) is **TESTNET** address - ensure this is desired before continuing", address) + return nil + } + if flow.Stagingnet.Chain().IsValid(address) { + log.Warn().Msgf("Machine account address (%s) is **STAGINGNET** address - ensure this is desired before continuing", address) return nil } if flow.Localnet.Chain().IsValid(address) { diff --git a/cmd/scaffold.go b/cmd/scaffold.go index 3df6ac88b3e..a3278fed3ed 100644 --- a/cmd/scaffold.go +++ b/cmd/scaffold.go @@ -858,12 +858,12 @@ func (fnb *FlowNodeBuilder) initFvmOptions() { fvm.WithBlocks(blockFinder), fvm.WithAccountStorageLimit(true), } - if fnb.RootChainID == flow.Testnet || fnb.RootChainID == flow.Canary || fnb.RootChainID == flow.Mainnet { + if fnb.RootChainID == flow.Testnet || fnb.RootChainID == flow.Stagingnet || fnb.RootChainID == flow.Mainnet { vmOpts = append(vmOpts, fvm.WithTransactionFeesEnabled(true), ) } - if fnb.RootChainID == flow.Testnet || fnb.RootChainID == flow.Canary || fnb.RootChainID == flow.Localnet || fnb.RootChainID == flow.Benchnet { + if fnb.RootChainID == flow.Testnet || fnb.RootChainID == flow.Stagingnet || fnb.RootChainID == flow.Localnet || fnb.RootChainID == flow.Benchnet { vmOpts = append(vmOpts, fvm.WithContractDeploymentRestricted(false), ) diff --git a/flips/component-interface.md b/flips/component-interface.md index 1fb4044a39c..ef7c97100a5 100644 --- a/flips/component-interface.md +++ b/flips/component-interface.md @@ -160,7 +160,7 @@ A component will now be started by passing a `SignalerContext` to its `Start` me // It is meant to inspect the error, determining its type and seeing if e.g. a restart or some other measure is suitable, // and then return an ErrorHandlingResult indicating how RunComponent should proceed. // Before returning, it could also: - // - panic (in canary / benchmark) + // - panic (in stagingnet / benchmark) // - log in various Error channels and / or send telemetry ... type OnError = func(err error) ErrorHandlingResult diff --git a/fvm/systemcontracts/system_contracts.go b/fvm/systemcontracts/system_contracts.go index fe7a1b785a7..032dfcc74f2 100644 --- a/fvm/systemcontracts/system_contracts.go +++ b/fvm/systemcontracts/system_contracts.go @@ -177,14 +177,14 @@ func init() { } contractAddressesByChainID[flow.Testnet] = testnet - // Canary test network + // Stagingnet test network // All system contracts are deployed to the service account - canary := map[string]flow.Address{ - ContractNameEpoch: flow.Canary.Chain().ServiceAddress(), - ContractNameClusterQC: flow.Canary.Chain().ServiceAddress(), - ContractNameDKG: flow.Canary.Chain().ServiceAddress(), + stagingnet := map[string]flow.Address{ + ContractNameEpoch: flow.Stagingnet.Chain().ServiceAddress(), + ContractNameClusterQC: flow.Stagingnet.Chain().ServiceAddress(), + ContractNameDKG: flow.Stagingnet.Chain().ServiceAddress(), } - contractAddressesByChainID[flow.Canary] = canary + contractAddressesByChainID[flow.Stagingnet] = stagingnet // Transient test networks // All system contracts are deployed to the service account diff --git a/fvm/systemcontracts/system_contracts_test.go b/fvm/systemcontracts/system_contracts_test.go index 454144c4c6a..63f34bdca3a 100644 --- a/fvm/systemcontracts/system_contracts_test.go +++ b/fvm/systemcontracts/system_contracts_test.go @@ -13,7 +13,7 @@ import ( // TestSystemContract_Address tests that we can retrieve a canonical address // for all accepted chains and contracts. func TestSystemContracts(t *testing.T) { - chains := []flow.ChainID{flow.Mainnet, flow.Testnet, flow.Canary, flow.Benchnet, flow.Localnet, flow.Emulator} + chains := []flow.ChainID{flow.Mainnet, flow.Testnet, flow.Stagingnet, flow.Benchnet, flow.Localnet, flow.Emulator} for _, chain := range chains { _, err := SystemContractsForChain(chain) @@ -34,7 +34,7 @@ func TestSystemContract_InvalidChainID(t *testing.T) { // TestServiceEvents tests that we can retrieve service events for all accepted // chains and contracts. func TestServiceEvents(t *testing.T) { - chains := []flow.ChainID{flow.Mainnet, flow.Testnet, flow.Canary, flow.Benchnet, flow.Localnet, flow.Emulator} + chains := []flow.ChainID{flow.Mainnet, flow.Testnet, flow.Stagingnet, flow.Benchnet, flow.Localnet, flow.Emulator} for _, chain := range chains { _, err := ServiceEventsForChain(chain) @@ -46,7 +46,7 @@ func TestServiceEvents(t *testing.T) { // TestServiceEventLookup_Consistency sanity checks consistency of the lookup // method, in case an update to ServiceEvents forgets to update the lookup. func TestServiceEventAll_Consistency(t *testing.T) { - chains := []flow.ChainID{flow.Mainnet, flow.Testnet, flow.Canary, flow.Benchnet, flow.Localnet, flow.Emulator} + chains := []flow.ChainID{flow.Mainnet, flow.Testnet, flow.Stagingnet, flow.Benchnet, flow.Localnet, flow.Emulator} fields := reflect.TypeOf(ServiceEvents{}).NumField() for _, chain := range chains { diff --git a/k8s/local/flow-collection-node-deployment.yml b/k8s/local/flow-collection-node-deployment.yml index 574a32c619d..40715c783f6 100644 --- a/k8s/local/flow-collection-node-deployment.yml +++ b/k8s/local/flow-collection-node-deployment.yml @@ -146,7 +146,7 @@ metadata: # Best practice labels: # app: (the non-unique version of metadata.name) # kind: [web|worker] - # env: [staging|production|canary|test|dev] + # env: [staging|production|test|dev] # owner: who to ask about this service # version: the major version of this service (v1/v2/v1beta1) labels: diff --git a/k8s/local/flow-consensus-node-deployment.yml b/k8s/local/flow-consensus-node-deployment.yml index c02f274a0b6..6e607ef15e6 100644 --- a/k8s/local/flow-consensus-node-deployment.yml +++ b/k8s/local/flow-consensus-node-deployment.yml @@ -8,7 +8,7 @@ metadata: # Best practice labels: # app: (the non-unique version of metadata.name) # kind: [web|worker] - # env: [staging|production|canary|test|dev] + # env: [staging|production|test|dev] # owner: who to ask about this service # version: the major version of this service (v1/v2/v1beta1) labels: diff --git a/k8s/local/flow-execution-node-deployment.yml b/k8s/local/flow-execution-node-deployment.yml index 7c0c6b76abe..21936b56f0a 100644 --- a/k8s/local/flow-execution-node-deployment.yml +++ b/k8s/local/flow-execution-node-deployment.yml @@ -8,7 +8,7 @@ metadata: # Best practice labels: # app: (the non-unique version of metadata.name) # kind: [web|worker] - # env: [staging|production|canary|test|dev] + # env: [staging|production|test|dev] # owner: who to ask about this service # version: the major version of this service (v1/v2/v1beta1) labels: diff --git a/k8s/local/flow-verification-node-deployment.yml b/k8s/local/flow-verification-node-deployment.yml index 1b64c35f963..c53fad7ea7e 100644 --- a/k8s/local/flow-verification-node-deployment.yml +++ b/k8s/local/flow-verification-node-deployment.yml @@ -8,7 +8,7 @@ metadata: # Best practice labels: # app: (the non-unique version of metadata.name) # kind: [web|worker] - # env: [staging|production|canary|test|dev] + # env: [staging|production|test|dev] # owner: who to ask about this service # version: the major version of this service (v1/v2/v1beta1) labels: diff --git a/k8s/staging/flow-collection-node-deployment.yml b/k8s/staging/flow-collection-node-deployment.yml index ebc918e81f6..7211babe90c 100644 --- a/k8s/staging/flow-collection-node-deployment.yml +++ b/k8s/staging/flow-collection-node-deployment.yml @@ -146,7 +146,7 @@ metadata: # Best practice labels: # app: (the non-unique version of metadata.name) # kind: [web|worker] - # env: [staging|production|canary|test|dev] + # env: [staging|production|test|dev] # owner: who to ask about this service # version: the major version of this service (v1/v2/v1beta1) labels: diff --git a/k8s/staging/flow-consensus-node-deployment.yml b/k8s/staging/flow-consensus-node-deployment.yml index 24bb4126c9c..1bb39a9ece7 100644 --- a/k8s/staging/flow-consensus-node-deployment.yml +++ b/k8s/staging/flow-consensus-node-deployment.yml @@ -8,7 +8,7 @@ metadata: # Best practice labels: # app: (the non-unique version of metadata.name) # kind: [web|worker] - # env: [staging|production|canary|test|dev] + # env: [staging|production|test|dev] # owner: who to ask about this service # version: the major version of this service (v1/v2/v1beta1) labels: diff --git a/k8s/staging/flow-execution-node-deployment.yml b/k8s/staging/flow-execution-node-deployment.yml index d4c0928bfb5..06fc8aa8785 100644 --- a/k8s/staging/flow-execution-node-deployment.yml +++ b/k8s/staging/flow-execution-node-deployment.yml @@ -8,7 +8,7 @@ metadata: # Best practice labels: # app: (the non-unique version of metadata.name) # kind: [web|worker] - # env: [staging|production|canary|test|dev] + # env: [staging|production|test|dev] # owner: who to ask about this service # version: the major version of this service (v1/v2/v1beta1) labels: diff --git a/k8s/staging/flow-verification-node-deployment.yml b/k8s/staging/flow-verification-node-deployment.yml index cee818600fd..2bae3231889 100644 --- a/k8s/staging/flow-verification-node-deployment.yml +++ b/k8s/staging/flow-verification-node-deployment.yml @@ -8,7 +8,7 @@ metadata: # Best practice labels: # app: (the non-unique version of metadata.name) # kind: [web|worker] - # env: [staging|production|canary|test|dev] + # env: [staging|production|test|dev] # owner: who to ask about this service # version: the major version of this service (v1/v2/v1beta1) labels: diff --git a/model/flow/address_test.go b/model/flow/address_test.go index 47dbb884e2c..568dcfdae44 100644 --- a/model/flow/address_test.go +++ b/model/flow/address_test.go @@ -98,7 +98,7 @@ func testAddressConstants(t *testing.T) { Mainnet, Testnet, Emulator, - Canary, + Stagingnet, } for _, chainID := range chainIDs { @@ -147,7 +147,7 @@ func testAddressGeneration(t *testing.T) { Mainnet, Testnet, Emulator, - Canary, + Stagingnet, } for _, chainID := range chainIDs { @@ -240,7 +240,7 @@ func testAddressesIntersection(t *testing.T) { Mainnet, Testnet, Emulator, - Canary, + Stagingnet, } for _, chainID := range chainIDs { @@ -309,7 +309,7 @@ func testIndexFromAddress(t *testing.T) { mainnet, testnet, emulator, - canary, + stagingnet, } for _, chain := range chains { diff --git a/model/flow/chain.go b/model/flow/chain.go index 91851729e58..431e9a36f8b 100644 --- a/model/flow/chain.go +++ b/model/flow/chain.go @@ -21,8 +21,8 @@ const ( // Testnet is the chain ID for the testnet chain. Testnet ChainID = "flow-testnet" - // Canary is the chain ID for internal canary chain. - Canary ChainID = "flow-canary" + // Stagingnet is the chain ID for internal stagingnet chain. + Stagingnet ChainID = "flow-stagingnet" // Transient test networks @@ -52,7 +52,7 @@ func (c ChainID) getChainCodeWord() uint64 { return 0 case Testnet: return invalidCodeTestNetwork - case Canary: + case Stagingnet: return invalidCodeCanaryNetwork case Emulator, Localnet, Benchnet, BftTestnet: return invalidCodeTransientNetwork @@ -175,9 +175,9 @@ var bftTestNet = &addressedChain{ }, } -var canary = &addressedChain{ +var stagingnet = &addressedChain{ chainImpl: &linearCodeImpl{ - chainID: Canary, + chainID: Stagingnet, }, } @@ -210,8 +210,8 @@ func (c ChainID) Chain() Chain { return mainnet case Testnet: return testnet - case Canary: - return canary + case Stagingnet: + return stagingnet case Benchnet: return benchnet case Localnet: diff --git a/module/component/component.go b/module/component/component.go index f296a7a25ec..95b646aac73 100644 --- a/module/component/component.go +++ b/module/component/component.go @@ -30,7 +30,7 @@ type ComponentFactory func() (Component, error) // It is meant to inspect the error, determining its type and seeing if e.g. a restart or some other measure is suitable, // and then return an ErrorHandlingResult indicating how RunComponent should proceed. // Before returning, it could also: -// - panic (in canary / benchmark) +// - panic (in stagingnet / benchmark) // - log in various Error channels and / or send telemetry ... type OnError = func(error) ErrorHandlingResult From 499b6f9fd53ea9952378e638cf5e9f6f4e64658b Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Wed, 13 Jul 2022 14:57:21 -0700 Subject: [PATCH 180/223] Apply suggestions from code review Co-authored-by: Jordan Schalm --- access/handler.go | 2 +- consensus/hotstuff/committee.go | 2 ++ engine/access/rpc/engine_builder.go | 2 +- engine/execution/rpc/engine.go | 2 +- state/errors.go | 2 +- 5 files changed, 6 insertions(+), 4 deletions(-) diff --git a/access/handler.go b/access/handler.go index 0393ac42369..4a4b2e1292a 100644 --- a/access/handler.go +++ b/access/handler.go @@ -18,7 +18,7 @@ import ( type Handler struct { api API chain flow.Chain - sgnIdcsDecoder hotstuff.BlockSignerDecoder + signerDecoder hotstuff.BlockSignerDecoder } // HandlerOption is used to hand over optional constructor parameters diff --git a/consensus/hotstuff/committee.go b/consensus/hotstuff/committee.go index 0027894ec29..1cb445aa3cb 100644 --- a/consensus/hotstuff/committee.go +++ b/consensus/hotstuff/committee.go @@ -49,6 +49,8 @@ type Committee interface { DKG(blockID flow.Identifier) (DKG, error) } +// BlockSignerDecoder defines how to convert the SignerIndices field within a particular +// block header to the identifiers of the nodes which signed the block. type BlockSignerDecoder interface { // DecodeSignerIDs decodes the signer indices from the given block header into full node IDs. // Expected Error returns during normal operations: diff --git a/engine/access/rpc/engine_builder.go b/engine/access/rpc/engine_builder.go index f172cf1ccf3..a2e15b96ea2 100644 --- a/engine/access/rpc/engine_builder.go +++ b/engine/access/rpc/engine_builder.go @@ -24,7 +24,7 @@ type RPCEngineBuilder struct { *Engine router *apiproxy.FlowAccessAPIRouter // this is set through `WithRouting`; or nil if not explicitly specified - sgnIdcsDecoder hotstuff.BlockSignerDecoder + signerDecoder hotstuff.BlockSignerDecoder } // WithRouting specifies that the given router should be used as primary access API. diff --git a/engine/execution/rpc/engine.go b/engine/execution/rpc/engine.go index 36f3dcf8126..75e8cac3100 100644 --- a/engine/execution/rpc/engine.go +++ b/engine/execution/rpc/engine.go @@ -156,7 +156,7 @@ type handler struct { blocks storage.Blocks headers storage.Headers state protocol.State - sgnIdcsDecoder hotstuff.BlockSignerDecoder + signerDecoder hotstuff.BlockSignerDecoder events storage.Events exeResults storage.ExecutionResults transactionResults storage.TransactionResults diff --git a/state/errors.go b/state/errors.go index b74c740f4f8..238557192d1 100644 --- a/state/errors.go +++ b/state/errors.go @@ -94,7 +94,7 @@ func IsNoValidChildBlockError(err error) bool { // UnknownBlockError is a sentinel error indicating that a certain block // has not been ingested yet. type UnknownBlockError struct { - err error + blockID flow.Identifier } func NewUnknownBlockErrorf(msg string, args ...interface{}) error { From 56b544659ba1645126047c4a4df9c15c0b406949 Mon Sep 17 00:00:00 2001 From: lolpuddle Date: Wed, 13 Jul 2022 18:16:30 -0400 Subject: [PATCH 181/223] fix suggestions --- engine/access/rpc/backend/backend_accounts.go | 2 +- engine/access/rpc/backend/backend_events.go | 2 +- engine/access/rpc/backend/backend_scripts.go | 2 +- engine/access/rpc/backend/backend_test.go | 6 +- .../rpc/backend/backend_transactions.go | 8 +- .../access/rpc/backend/connection_factory.go | 73 ++++++++++--------- .../rpc/backend/mock/connection_factory.go | 8 +- 7 files changed, 51 insertions(+), 50 deletions(-) diff --git a/engine/access/rpc/backend/backend_accounts.go b/engine/access/rpc/backend/backend_accounts.go index ba6cced5108..3c3ac5b080b 100644 --- a/engine/access/rpc/backend/backend_accounts.go +++ b/engine/access/rpc/backend/backend_accounts.go @@ -160,7 +160,7 @@ func (b *backendAccounts) tryGetAccount(ctx context.Context, execNode *flow.Iden resp, err := execRPCClient.GetAccountAtBlockID(ctx, &req) if err != nil { if status.Code(err) == codes.Unavailable { - b.connFactory.RefreshExecutionAPIClient(execNode.Address) + b.connFactory.InvalidateExecutionAPIClient(execNode.Address) } return nil, err } diff --git a/engine/access/rpc/backend/backend_events.go b/engine/access/rpc/backend/backend_events.go index 44e64581e32..cc047606b3a 100644 --- a/engine/access/rpc/backend/backend_events.go +++ b/engine/access/rpc/backend/backend_events.go @@ -214,7 +214,7 @@ func (b *backendEvents) tryGetEvents(ctx context.Context, resp, err := execRPCClient.GetEventsForBlockIDs(ctx, &req) if err != nil { if status.Code(err) == codes.Unavailable { - b.connFactory.RefreshExecutionAPIClient(execNode.Address) + b.connFactory.InvalidateExecutionAPIClient(execNode.Address) } return nil, err } diff --git a/engine/access/rpc/backend/backend_scripts.go b/engine/access/rpc/backend/backend_scripts.go index ca2f981b85b..ba0c72ed726 100644 --- a/engine/access/rpc/backend/backend_scripts.go +++ b/engine/access/rpc/backend/backend_scripts.go @@ -172,7 +172,7 @@ func (b *backendScripts) tryExecuteScript(ctx context.Context, execNode *flow.Id execResp, err := execRPCClient.ExecuteScriptAtBlockID(ctx, &req) if err != nil { if status.Code(err) == codes.Unavailable { - b.connFactory.RefreshExecutionAPIClient(execNode.Address) + b.connFactory.InvalidateExecutionAPIClient(execNode.Address) } return nil, status.Errorf(status.Code(err), "failed to execute the script on the execution node %s: %v", execNode.String(), err) } diff --git a/engine/access/rpc/backend/backend_test.go b/engine/access/rpc/backend/backend_test.go index 953d2f25ca8..6a3778b7465 100644 --- a/engine/access/rpc/backend/backend_test.go +++ b/engine/access/rpc/backend/backend_test.go @@ -767,7 +767,7 @@ func (suite *Suite) TestTransactionStatusTransition() { // create a mock connection factory connFactory := new(backendmock.ConnectionFactory) connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, nil) - connFactory.On("RefreshExecutionAPIClient", mock.Anything) + connFactory.On("InvalidateExecutionAPIClient", mock.Anything) exeEventReq := execproto.GetTransactionResultRequest{ BlockId: blockID[:], @@ -2162,7 +2162,7 @@ func (suite *Suite) TestExecuteScriptOnExecutionNode() { connFactory := new(backendmock.ConnectionFactory) connFactory.On("GetExecutionAPIClient", mock.Anything). Return(suite.execClient, nil) - connFactory.On("RefreshExecutionAPIClient", mock.Anything) + connFactory.On("InvalidateExecutionAPIClient", mock.Anything) // create the handler with the mock backend := New( @@ -2263,7 +2263,7 @@ func (suite *Suite) setupConnectionFactory() ConnectionFactory { // create a mock connection factory connFactory := new(backendmock.ConnectionFactory) connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, nil) - connFactory.On("RefreshExecutionAPIClient", mock.Anything) + connFactory.On("InvalidateExecutionAPIClient", mock.Anything) return connFactory } diff --git a/engine/access/rpc/backend/backend_transactions.go b/engine/access/rpc/backend/backend_transactions.go index 9635a0bd204..6c7c7ef5e61 100644 --- a/engine/access/rpc/backend/backend_transactions.go +++ b/engine/access/rpc/backend/backend_transactions.go @@ -152,7 +152,7 @@ func (b *backendTransactions) sendTransactionToCollector(ctx context.Context, err = b.grpcTxSend(ctx, collectionRPC, tx) if err != nil { if status.Code(err) == codes.Unavailable { - b.connFactory.RefreshAccessAPIClient(collectionNodeAddr) + b.connFactory.InvalidateAccessAPIClient(collectionNodeAddr) } return fmt.Errorf("failed to send transaction to collection node at %s: %v", collectionNodeAddr, err) } @@ -711,7 +711,7 @@ func (b *backendTransactions) tryGetTransactionResult( resp, err := execRPCClient.GetTransactionResult(ctx, &req) if err != nil { if status.Code(err) == codes.Unavailable { - b.connFactory.RefreshExecutionAPIClient(execNode.Address) + b.connFactory.InvalidateExecutionAPIClient(execNode.Address) } return nil, err } @@ -767,7 +767,7 @@ func (b *backendTransactions) tryGetTransactionResultsByBlockID( resp, err := execRPCClient.GetTransactionResultsByBlockID(ctx, &req) if err != nil { if status.Code(err) == codes.Unavailable { - b.connFactory.RefreshExecutionAPIClient(execNode.Address) + b.connFactory.InvalidateExecutionAPIClient(execNode.Address) } return nil, err } @@ -824,7 +824,7 @@ func (b *backendTransactions) tryGetTransactionResultByIndex( resp, err := execRPCClient.GetTransactionResultByIndex(ctx, &req) if err != nil { if status.Code(err) == codes.Unavailable { - b.connFactory.RefreshExecutionAPIClient(execNode.Address) + b.connFactory.InvalidateExecutionAPIClient(execNode.Address) } return nil, err } diff --git a/engine/access/rpc/backend/connection_factory.go b/engine/access/rpc/backend/connection_factory.go index 29e0cecff44..2d0ec572e38 100644 --- a/engine/access/rpc/backend/connection_factory.go +++ b/engine/access/rpc/backend/connection_factory.go @@ -12,6 +12,7 @@ import ( "github.com/onflow/flow/protobuf/go/flow/execution" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" "github.com/onflow/flow-go/module" @@ -24,9 +25,9 @@ const defaultClientTimeout = 3 * time.Second // ConnectionFactory is used to create an access api client type ConnectionFactory interface { GetAccessAPIClient(address string) (access.AccessAPIClient, error) - RefreshAccessAPIClient(address string) + InvalidateAccessAPIClient(address string) GetExecutionAPIClient(address string) (execution.ExecutionAPIClient, error) - RefreshExecutionAPIClient(address string) + InvalidateExecutionAPIClient(address string) } type ProxyConnectionFactory struct { @@ -77,7 +78,7 @@ func (cf *ConnectionFactoryImpl) createConnection(address string, timeout time.D conn, err := grpc.Dial( address, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcutils.DefaultMaxMsgSize)), - grpc.WithInsecure(), //nolint:staticcheck + grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithKeepaliveParams(keepaliveParams), WithClientUnaryInterceptor(timeout)) if err != nil { @@ -88,21 +89,28 @@ func (cf *ConnectionFactoryImpl) createConnection(address string, timeout time.D func (cf *ConnectionFactoryImpl) retrieveConnection(grpcAddress string, timeout time.Duration) (*grpc.ClientConn, error) { var conn *grpc.ClientConn - var mutex *sync.Mutex + clientMutex := new(sync.Mutex) + store := ConnectionCacheStore{ + ClientConn: nil, + mutex: clientMutex, + } + if prev, ok, _ := cf.ConnectionsCache.PeekOrAdd(grpcAddress, store); ok { + clientMutex = prev.(ConnectionCacheStore).mutex + } + // we lock this mutex to prevent a scenario where the connection is not good, which will result in + // re-establishing the connection for this address. if the mutex is not locked, we may attempt to re-establish + // the connection multiple times which would result in cache thrashing. + clientMutex.Lock() + defer clientMutex.Unlock() + + // get again to update the LRU cache if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { conn = res.(ConnectionCacheStore).ClientConn - mutex = res.(ConnectionCacheStore).mutex - - // we lock this mutex to prevent a scenario where the connection is not good, which will result in - // re-establishing the connection for this address. if the mutex is not locked, we may attempt to re-establish - // the connection multiple times which would result in cache thrashing. - mutex.Lock() - defer mutex.Unlock() if cf.AccessMetrics != nil { cf.AccessMetrics.ConnectionFromPoolRetrieved() } } - if conn == nil || conn.GetState() != connectivity.Ready { + if conn == nil || conn.GetState() == connectivity.Shutdown { // this lock prevents a memory leak where a race condition may occur if 2 requests to a new connection at the // same address occur. the second add would overwrite the first without closing the connection cf.lock.Lock() @@ -110,7 +118,7 @@ func (cf *ConnectionFactoryImpl) retrieveConnection(grpcAddress string, timeout // Check if connection was created/refreshed by another thread if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { conn = res.(ConnectionCacheStore).ClientConn - if conn.GetState() == connectivity.Ready { + if conn != nil && conn.GetState() != connectivity.Shutdown { return conn, nil } } @@ -120,7 +128,7 @@ func (cf *ConnectionFactoryImpl) retrieveConnection(grpcAddress string, timeout conn.Close() } var err error - conn, err = cf.addConnection(grpcAddress, timeout, mutex) + conn, err = cf.addConnection(grpcAddress, timeout, clientMutex) if err != nil { return nil, err } @@ -136,10 +144,7 @@ func (cf *ConnectionFactoryImpl) addConnection(grpcAddress string, timeout time. store := ConnectionCacheStore{ ClientConn: conn, - mutex: new(sync.Mutex), - } - if mutex != nil { - store.mutex = mutex + mutex: mutex, } cf.ConnectionsCache.Add(grpcAddress, store) @@ -165,17 +170,8 @@ func (cf *ConnectionFactoryImpl) GetAccessAPIClient(address string) (access.Acce return accessAPIClient, nil } -func (cf *ConnectionFactoryImpl) RefreshAccessAPIClient(address string) { - grpcAddress, _ := getGRPCAddress(address, cf.CollectionGRPCPort) - if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { - store := res.(ConnectionCacheStore) - store.mutex.Lock() - cf.lock.Lock() - defer store.mutex.Unlock() - defer cf.lock.Unlock() - store.ClientConn.Close() - _, _ = cf.addConnection(grpcAddress, cf.CollectionNodeGRPCTimeout, store.mutex) - } +func (cf *ConnectionFactoryImpl) InvalidateAccessAPIClient(address string) { + cf.invalidateAPIClient(address, cf.CollectionGRPCPort) } func (cf *ConnectionFactoryImpl) GetExecutionAPIClient(address string) (execution.ExecutionAPIClient, error) { @@ -194,16 +190,21 @@ func (cf *ConnectionFactoryImpl) GetExecutionAPIClient(address string) (executio return executionAPIClient, nil } -func (cf *ConnectionFactoryImpl) RefreshExecutionAPIClient(address string) { - grpcAddress, _ := getGRPCAddress(address, cf.CollectionGRPCPort) +func (cf *ConnectionFactoryImpl) InvalidateExecutionAPIClient(address string) { + cf.invalidateAPIClient(address, cf.ExecutionGRPCPort) +} + +func (cf *ConnectionFactoryImpl) invalidateAPIClient(address string, port uint) { + grpcAddress, _ := getGRPCAddress(address, port) if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { store := res.(ConnectionCacheStore) store.mutex.Lock() - cf.lock.Lock() - defer store.mutex.Unlock() - defer cf.lock.Unlock() - store.ClientConn.Close() - _, _ = cf.addConnection(grpcAddress, cf.CollectionNodeGRPCTimeout, store.mutex) + if store.ClientConn != nil { + conn := store.ClientConn + conn.Close() + store.ClientConn = nil + } + store.mutex.Unlock() } } diff --git a/engine/access/rpc/backend/mock/connection_factory.go b/engine/access/rpc/backend/mock/connection_factory.go index 39dc25f49f9..b053853e991 100644 --- a/engine/access/rpc/backend/mock/connection_factory.go +++ b/engine/access/rpc/backend/mock/connection_factory.go @@ -61,13 +61,13 @@ func (_m *ConnectionFactory) GetExecutionAPIClient(address string) (execution.Ex return r0, r1 } -// RefreshAccessAPIClient provides a mock function with given fields: address -func (_m *ConnectionFactory) RefreshAccessAPIClient(address string) { +// InvalidateAccessAPIClient provides a mock function with given fields: address +func (_m *ConnectionFactory) InvalidateAccessAPIClient(address string) { _m.Called(address) } -// RefreshExecutionAPIClient provides a mock function with given fields: address -func (_m *ConnectionFactory) RefreshExecutionAPIClient(address string) { +// InvalidateExecutionAPIClient provides a mock function with given fields: address +func (_m *ConnectionFactory) InvalidateExecutionAPIClient(address string) { _m.Called(address) } From f68461b0f0e380269b683c534bed772d828c9180 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Wed, 13 Jul 2022 15:17:32 -0700 Subject: [PATCH 182/223] addressing reviewer comments part 2 --- access/handler.go | 20 ++++----- consensus/hotstuff/committee.go | 4 +- .../signature/block_signer_decoder.go | 2 + engine/access/rpc/engine_builder.go | 14 +++--- engine/execution/rpc/engine.go | 44 +++++++++---------- 5 files changed, 44 insertions(+), 40 deletions(-) diff --git a/access/handler.go b/access/handler.go index 4a4b2e1292a..2379f0c1aa8 100644 --- a/access/handler.go +++ b/access/handler.go @@ -16,9 +16,9 @@ import ( ) type Handler struct { - api API - chain flow.Chain - signerDecoder hotstuff.BlockSignerDecoder + api API + chain flow.Chain + signerIndicesDecoder hotstuff.BlockSignerDecoder } // HandlerOption is used to hand over optional constructor parameters @@ -26,9 +26,9 @@ type HandlerOption func(*Handler) func NewHandler(api API, chain flow.Chain, options ...HandlerOption) *Handler { h := &Handler{ - api: api, - chain: chain, - sgnIdcsDecoder: &signature.NoopBlockSignerDecoder{}, + api: api, + chain: chain, + signerIndicesDecoder: &signature.NoopBlockSignerDecoder{}, } for _, opt := range options { opt(h) @@ -490,7 +490,7 @@ func (h *Handler) GetExecutionResultForBlockID(ctx context.Context, req *access. } func (h *Handler) blockResponse(block *flow.Block, fullResponse bool) (*access.BlockResponse, error) { - signerIDs, err := h.sgnIdcsDecoder.DecodeSignerIDs(block.Header) + signerIDs, err := h.signerIndicesDecoder.DecodeSignerIDs(block.Header) if err != nil { return nil, err } @@ -510,7 +510,7 @@ func (h *Handler) blockResponse(block *flow.Block, fullResponse bool) (*access.B } func (h *Handler) blockHeaderResponse(header *flow.Header) (*access.BlockHeaderResponse, error) { - signerIDs, err := h.sgnIdcsDecoder.DecodeSignerIDs(header) + signerIDs, err := h.signerIndicesDecoder.DecodeSignerIDs(header) if err != nil { return nil, err } @@ -563,8 +563,8 @@ func blockEventsToMessage(block flow.BlockEvents) (*access.EventsResponse_Result // WithBlockSignerDecoder configures the Handler to decode signer indices // via the provided hotstuff.BlockSignerDecoder -func WithBlockSignerDecoder(sgnIdcsDecoder hotstuff.BlockSignerDecoder) func(*Handler) { +func WithBlockSignerDecoder(signerIndicesDecoder hotstuff.BlockSignerDecoder) func(*Handler) { return func(handler *Handler) { - handler.sgnIdcsDecoder = sgnIdcsDecoder + handler.signerIndicesDecoder = signerIndicesDecoder } } diff --git a/consensus/hotstuff/committee.go b/consensus/hotstuff/committee.go index 1cb445aa3cb..be48e70798f 100644 --- a/consensus/hotstuff/committee.go +++ b/consensus/hotstuff/committee.go @@ -22,6 +22,7 @@ type Committee interface { // * contains nodes that are allowed to sign the specified block (legitimate participants with NON-ZERO WEIGHT) // * is ordered in the canonical order // * contains no duplicates. + // TODO: document possible error returns -> https://github.com/dapperlabs/flow-go/issues/6327 Identities(blockID flow.Identifier) (flow.IdentityList, error) // Identity returns the full Identity for specified HotStuff participant. @@ -50,11 +51,12 @@ type Committee interface { } // BlockSignerDecoder defines how to convert the SignerIndices field within a particular -// block header to the identifiers of the nodes which signed the block. +// block header to the identifiers of the nodes which signed the block. type BlockSignerDecoder interface { // DecodeSignerIDs decodes the signer indices from the given block header into full node IDs. // Expected Error returns during normal operations: // * state.UnknownBlockError if block has not been ingested yet + // TODO: this sentinel could be changed to `ErrViewForUnknownEpoch` once we merge the active pacemaker // * signature.InvalidSignerIndicesError if signer indices included in the header do // not encode a valid subset of the consensus committee DecodeSignerIDs(header *flow.Header) (flow.IdentifierList, error) diff --git a/consensus/hotstuff/signature/block_signer_decoder.go b/consensus/hotstuff/signature/block_signer_decoder.go index a7a23172a2b..5c0d19dec72 100644 --- a/consensus/hotstuff/signature/block_signer_decoder.go +++ b/consensus/hotstuff/signature/block_signer_decoder.go @@ -38,6 +38,8 @@ func (b *BlockSignerDecoder) DecodeSignerIDs(header *flow.Header) (flow.Identifi id := header.ID() members, err := b.Identities(id) if err != nil { + // TODO: this potentially needs to be updated when we implement and document proper error handling for + // `hotstuff.Committee` and underlying code (such as `protocol.Snapshot`) if errors.Is(err, storage.ErrNotFound) { return nil, state.NewUnknownBlockErrorf("block %v has not been processed yet: %w", id, err) } diff --git a/engine/access/rpc/engine_builder.go b/engine/access/rpc/engine_builder.go index a2e15b96ea2..43f20673b08 100644 --- a/engine/access/rpc/engine_builder.go +++ b/engine/access/rpc/engine_builder.go @@ -15,16 +15,16 @@ import ( // NewRPCEngineBuilder helps to build a new RPC engine. func NewRPCEngineBuilder(engine *Engine) *RPCEngineBuilder { return &RPCEngineBuilder{ - Engine: engine, - sgnIdcsDecoder: signature.NewNoopBlockSignerDecoder(), + Engine: engine, + signerIndicesDecoder: signature.NewNoopBlockSignerDecoder(), } } type RPCEngineBuilder struct { *Engine - router *apiproxy.FlowAccessAPIRouter // this is set through `WithRouting`; or nil if not explicitly specified - signerDecoder hotstuff.BlockSignerDecoder + router *apiproxy.FlowAccessAPIRouter // this is set through `WithRouting`; or nil if not explicitly specified + signerIndicesDecoder hotstuff.BlockSignerDecoder } // WithRouting specifies that the given router should be used as primary access API. @@ -37,8 +37,8 @@ func (builder *RPCEngineBuilder) WithRouting(router *apiproxy.FlowAccessAPIRoute // WithBlockSignerDecoder specifies that signer indices in block headers should be translated // to full node IDs with the given decoder. // Returns self-reference for chaining. -func (builder *RPCEngineBuilder) WithBlockSignerDecoder(sgnIdcsDecoder hotstuff.BlockSignerDecoder) *RPCEngineBuilder { - builder.sgnIdcsDecoder = sgnIdcsDecoder +func (builder *RPCEngineBuilder) WithBlockSignerDecoder(signerIndicesDecoder hotstuff.BlockSignerDecoder) *RPCEngineBuilder { + builder.signerIndicesDecoder = signerIndicesDecoder return builder } @@ -68,7 +68,7 @@ func (builder *RPCEngineBuilder) WithMetrics() *RPCEngineBuilder { } func (builder *RPCEngineBuilder) Build() *Engine { - var localAPIServer accessproto.AccessAPIServer = access.NewHandler(builder.backend, builder.chain, access.WithBlockSignerDecoder(builder.sgnIdcsDecoder)) + var localAPIServer accessproto.AccessAPIServer = access.NewHandler(builder.backend, builder.chain, access.WithBlockSignerDecoder(builder.signerIndicesDecoder)) if builder.router != nil { builder.router.SetLocalAPI(localAPIServer) diff --git a/engine/execution/rpc/engine.go b/engine/execution/rpc/engine.go index 75e8cac3100..ecc27371a53 100644 --- a/engine/execution/rpc/engine.go +++ b/engine/execution/rpc/engine.go @@ -55,7 +55,7 @@ func New( exeResults storage.ExecutionResults, txResults storage.TransactionResults, chainID flow.ChainID, - sgnIdcsDecoder hotstuff.BlockSignerDecoder, + signerIndicesDecoder hotstuff.BlockSignerDecoder, apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the gRPC API e.g. Ping->100, ExecuteScriptAtBlockID->300 apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the gRPC API e.g. Ping->50, ExecuteScriptAtBlockID->10 ) *Engine { @@ -92,16 +92,16 @@ func New( log: log, unit: engine.NewUnit(), handler: &handler{ - engine: e, - chain: chainID, - blocks: blocks, - headers: headers, - state: state, - sgnIdcsDecoder: sgnIdcsDecoder, - events: events, - exeResults: exeResults, - transactionResults: txResults, - log: log, + engine: e, + chain: chainID, + blocks: blocks, + headers: headers, + state: state, + signerIndicesDecoder: signerIndicesDecoder, + events: events, + exeResults: exeResults, + transactionResults: txResults, + log: log, }, server: server, config: config, @@ -151,16 +151,16 @@ func (e *Engine) serve() { // handler implements a subset of the Observation API. type handler struct { - engine ingestion.IngestRPC - chain flow.ChainID - blocks storage.Blocks - headers storage.Headers - state protocol.State - signerDecoder hotstuff.BlockSignerDecoder - events storage.Events - exeResults storage.ExecutionResults - transactionResults storage.TransactionResults - log zerolog.Logger + engine ingestion.IngestRPC + chain flow.ChainID + blocks storage.Blocks + headers storage.Headers + state protocol.State + signerIndicesDecoder hotstuff.BlockSignerDecoder + events storage.Events + exeResults storage.ExecutionResults + transactionResults storage.TransactionResults + log zerolog.Logger } var _ execution.ExecutionAPIServer = &handler{} @@ -558,7 +558,7 @@ func (h *handler) GetBlockHeaderByID( } func (h *handler) blockHeaderResponse(header *flow.Header) (*execution.BlockHeaderResponse, error) { - signerIDs, err := h.sgnIdcsDecoder.DecodeSignerIDs(header) + signerIDs, err := h.signerIndicesDecoder.DecodeSignerIDs(header) if err != nil { return nil, fmt.Errorf("failed to decode signer indices to Identifiers for block %v: %w", header.ID(), err) } From e977ef375a0d1cbd58687c7d1e3fe58ef6ceacd8 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Wed, 13 Jul 2022 15:36:26 -0700 Subject: [PATCH 183/223] extended `UnknownBlockError` --- .../hotstuff/signature/block_signer_decoder.go | 2 +- state/errors.go | 16 ++++++++++++++-- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/consensus/hotstuff/signature/block_signer_decoder.go b/consensus/hotstuff/signature/block_signer_decoder.go index 5c0d19dec72..65d59cb3b45 100644 --- a/consensus/hotstuff/signature/block_signer_decoder.go +++ b/consensus/hotstuff/signature/block_signer_decoder.go @@ -41,7 +41,7 @@ func (b *BlockSignerDecoder) DecodeSignerIDs(header *flow.Header) (flow.Identifi // TODO: this potentially needs to be updated when we implement and document proper error handling for // `hotstuff.Committee` and underlying code (such as `protocol.Snapshot`) if errors.Is(err, storage.ErrNotFound) { - return nil, state.NewUnknownBlockErrorf("block %v has not been processed yet: %w", id, err) + return nil, state.WrapAsUnknownBlockError(id, err) } return nil, fmt.Errorf("fail to retrieve identities for block %v: %w", id, err) } diff --git a/state/errors.go b/state/errors.go index 238557192d1..f9d6aacada0 100644 --- a/state/errors.go +++ b/state/errors.go @@ -3,6 +3,8 @@ package state import ( "errors" "fmt" + + "github.com/onflow/flow-go/model/flow" ) // InvalidExtensionError is an error for invalid extension of the state @@ -95,11 +97,21 @@ func IsNoValidChildBlockError(err error) bool { // has not been ingested yet. type UnknownBlockError struct { blockID flow.Identifier + err error } -func NewUnknownBlockErrorf(msg string, args ...interface{}) error { +// WrapAsUnknownBlockError wraps a given error as UnknownBlockError +func WrapAsUnknownBlockError(blockID flow.Identifier, err error) error { return UnknownBlockError{ - err: fmt.Errorf(msg, args...), + blockID: blockID, + err: fmt.Errorf("block %v has not been processed yet: %w", blockID, err), + } +} + +func NewUnknownBlockError(blockID flow.Identifier) error { + return UnknownBlockError{ + blockID: blockID, + err: fmt.Errorf("block %v has not been processed yet: %w", blockID), } } From d66f021ae7636e74059051cfa5751838ae89787b Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Wed, 13 Jul 2022 15:41:38 -0700 Subject: [PATCH 184/223] linted code --- state/errors.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/state/errors.go b/state/errors.go index f9d6aacada0..e0b1c7205d4 100644 --- a/state/errors.go +++ b/state/errors.go @@ -111,7 +111,7 @@ func WrapAsUnknownBlockError(blockID flow.Identifier, err error) error { func NewUnknownBlockError(blockID flow.Identifier) error { return UnknownBlockError{ blockID: blockID, - err: fmt.Errorf("block %v has not been processed yet: %w", blockID), + err: fmt.Errorf("block %v has not been processed yet", blockID), } } From c38da8c27452217800b275a52faf7b8c2e255acd Mon Sep 17 00:00:00 2001 From: lolpuddle Date: Wed, 13 Jul 2022 19:31:15 -0400 Subject: [PATCH 185/223] remove global lock --- engine/access/rpc/backend/connection_factory.go | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/engine/access/rpc/backend/connection_factory.go b/engine/access/rpc/backend/connection_factory.go index 2d0ec572e38..3fb46c5e922 100644 --- a/engine/access/rpc/backend/connection_factory.go +++ b/engine/access/rpc/backend/connection_factory.go @@ -50,7 +50,6 @@ type ConnectionFactoryImpl struct { ExecutionNodeGRPCTimeout time.Duration ConnectionsCache *lru.Cache CacheSize uint - lock sync.Mutex AccessMetrics module.AccessMetrics } @@ -111,18 +110,6 @@ func (cf *ConnectionFactoryImpl) retrieveConnection(grpcAddress string, timeout } } if conn == nil || conn.GetState() == connectivity.Shutdown { - // this lock prevents a memory leak where a race condition may occur if 2 requests to a new connection at the - // same address occur. the second add would overwrite the first without closing the connection - cf.lock.Lock() - defer cf.lock.Unlock() - // Check if connection was created/refreshed by another thread - if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { - conn = res.(ConnectionCacheStore).ClientConn - if conn != nil && conn.GetState() != connectivity.Shutdown { - return conn, nil - } - } - // updates to the cache don't trigger evictions; this line closes connections before re-establishing new ones if conn != nil { conn.Close() From bc0daba607f4d176a32faff9b4a15ece8ec0946e Mon Sep 17 00:00:00 2001 From: Kay-Zee Date: Wed, 13 Jul 2022 17:56:57 -0700 Subject: [PATCH 186/223] Fix error codes from canary to staging --- engine/common/rpc/convert/convert.go | 2 +- model/flow/address.go | 4 ++-- model/flow/chain.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/engine/common/rpc/convert/convert.go b/engine/common/rpc/convert/convert.go index d8dfa090d06..3fbcfca9a20 100644 --- a/engine/common/rpc/convert/convert.go +++ b/engine/common/rpc/convert/convert.go @@ -19,7 +19,7 @@ var ErrEmptyMessage = errors.New("protobuf message is empty") var ValidChainIds = map[string]bool{ flow.Mainnet.String(): true, flow.Testnet.String(): true, - flow.Canary.String(): true, + flow.Stagingnet.String(): true, flow.Benchnet.String(): true, flow.Localnet.String(): true, flow.Emulator.String(): true, diff --git a/model/flow/address.go b/model/flow/address.go index 19737e6bf19..1c5809a0c10 100644 --- a/model/flow/address.go +++ b/model/flow/address.go @@ -258,8 +258,8 @@ const invalidCodeTestNetwork = uint64(0x6834ba37b3980209) // invalidCodeTransientNetwork is the invalid codeword used for transient test networks. const invalidCodeTransientNetwork = uint64(0x1cb159857af02018) -// invalidCodeCanaryNetwork is the invalid codeword used for Canary network. -const invalidCodeCanaryNetwork = uint64(0x1035ce4eff92ae01) +// invalidCodeStagingNetwork is the invalid codeword used for Staging network. +const invalidCodeStagingNetwork = uint64(0x1035ce4eff92ae01) // encodeWord encodes a word into a code word. // In Flow, the word is the account index while the code word diff --git a/model/flow/chain.go b/model/flow/chain.go index 431e9a36f8b..9831f165619 100644 --- a/model/flow/chain.go +++ b/model/flow/chain.go @@ -53,7 +53,7 @@ func (c ChainID) getChainCodeWord() uint64 { case Testnet: return invalidCodeTestNetwork case Stagingnet: - return invalidCodeCanaryNetwork + return invalidCodeStagingNetwork case Emulator, Localnet, Benchnet, BftTestnet: return invalidCodeTransientNetwork default: From 51f71e526032fa7543d6e401d01b29bd89891dcf Mon Sep 17 00:00:00 2001 From: lolpuddle Date: Thu, 14 Jul 2022 03:14:36 -0400 Subject: [PATCH 187/223] increase cache size + better locking --- engine/access/rpc/backend/backend.go | 2 +- .../access/rpc/backend/connection_factory.go | 51 +++++++++---------- .../rpc/backend/connection_factory_test.go | 24 ++++----- engine/access/rpc/engine.go | 4 +- 4 files changed, 41 insertions(+), 40 deletions(-) diff --git a/engine/access/rpc/backend/backend.go b/engine/access/rpc/backend/backend.go index 469a91d8685..250d9fc8683 100644 --- a/engine/access/rpc/backend/backend.go +++ b/engine/access/rpc/backend/backend.go @@ -43,7 +43,7 @@ const DefaultSnapshotHistoryLimit = 50 const DefaultLoggedScriptsCacheSize = 1_000_000 // DefaultConnectionPoolSize is the default size for the connection pool to collection and execution nodes -const DefaultConnectionPoolSize = 10 +const DefaultConnectionPoolSize = 50 var preferredENIdentifiers flow.IdentifierList var fixedENIdentifiers flow.IdentifierList diff --git a/engine/access/rpc/backend/connection_factory.go b/engine/access/rpc/backend/connection_factory.go index 3fb46c5e922..33ecf9b0332 100644 --- a/engine/access/rpc/backend/connection_factory.go +++ b/engine/access/rpc/backend/connection_factory.go @@ -19,8 +19,8 @@ import ( "github.com/onflow/flow-go/utils/grpcutils" ) -// the default timeout used when making a GRPC request to a collection node or an execution node -const defaultClientTimeout = 3 * time.Second +// DefaultClientTimeout is used when making a GRPC request to a collection node or an execution node +const DefaultClientTimeout = 3 * time.Second // ConnectionFactory is used to create an access api client type ConnectionFactory interface { @@ -51,6 +51,7 @@ type ConnectionFactoryImpl struct { ConnectionsCache *lru.Cache CacheSize uint AccessMetrics module.AccessMetrics + mutex sync.Mutex } type ConnectionCacheStore struct { @@ -62,7 +63,7 @@ type ConnectionCacheStore struct { func (cf *ConnectionFactoryImpl) createConnection(address string, timeout time.Duration) (*grpc.ClientConn, error) { if timeout == 0 { - timeout = defaultClientTimeout + timeout = DefaultClientTimeout } keepaliveParams := keepalive.ClientParameters{ @@ -88,34 +89,36 @@ func (cf *ConnectionFactoryImpl) createConnection(address string, timeout time.D func (cf *ConnectionFactoryImpl) retrieveConnection(grpcAddress string, timeout time.Duration) (*grpc.ClientConn, error) { var conn *grpc.ClientConn - clientMutex := new(sync.Mutex) - store := ConnectionCacheStore{ - ClientConn: nil, - mutex: clientMutex, - } - if prev, ok, _ := cf.ConnectionsCache.PeekOrAdd(grpcAddress, store); ok { - clientMutex = prev.(ConnectionCacheStore).mutex - } - // we lock this mutex to prevent a scenario where the connection is not good, which will result in - // re-establishing the connection for this address. if the mutex is not locked, we may attempt to re-establish - // the connection multiple times which would result in cache thrashing. - clientMutex.Lock() - defer clientMutex.Unlock() + var clientMutex *sync.Mutex + var store *ConnectionCacheStore - // get again to update the LRU cache + cf.mutex.Lock() if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { - conn = res.(ConnectionCacheStore).ClientConn + store = res.(*ConnectionCacheStore) + clientMutex = store.mutex + conn = store.ClientConn if cf.AccessMetrics != nil { cf.AccessMetrics.ConnectionFromPoolRetrieved() } + } else { + clientMutex = new(sync.Mutex) + store = &ConnectionCacheStore{ + ClientConn: nil, + mutex: clientMutex, + } + cf.ConnectionsCache.Add(grpcAddress, store) } + clientMutex.Lock() + defer clientMutex.Unlock() + cf.mutex.Unlock() + if conn == nil || conn.GetState() == connectivity.Shutdown { // updates to the cache don't trigger evictions; this line closes connections before re-establishing new ones if conn != nil { conn.Close() } var err error - conn, err = cf.addConnection(grpcAddress, timeout, clientMutex) + conn, err = cf.addConnection(grpcAddress, timeout, store) if err != nil { return nil, err } @@ -123,18 +126,14 @@ func (cf *ConnectionFactoryImpl) retrieveConnection(grpcAddress string, timeout return conn, nil } -func (cf *ConnectionFactoryImpl) addConnection(grpcAddress string, timeout time.Duration, mutex *sync.Mutex) (*grpc.ClientConn, error) { +func (cf *ConnectionFactoryImpl) addConnection(grpcAddress string, timeout time.Duration, store *ConnectionCacheStore) (*grpc.ClientConn, error) { conn, err := cf.createConnection(grpcAddress, timeout) if err != nil { return nil, err } - store := ConnectionCacheStore{ - ClientConn: conn, - mutex: mutex, - } + store.ClientConn = conn - cf.ConnectionsCache.Add(grpcAddress, store) if cf.AccessMetrics != nil { cf.AccessMetrics.TotalConnectionsInPool(uint(cf.ConnectionsCache.Len()), cf.CacheSize) } @@ -184,7 +183,7 @@ func (cf *ConnectionFactoryImpl) InvalidateExecutionAPIClient(address string) { func (cf *ConnectionFactoryImpl) invalidateAPIClient(address string, port uint) { grpcAddress, _ := getGRPCAddress(address, port) if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { - store := res.(ConnectionCacheStore) + store := res.(*ConnectionCacheStore) store.mutex.Lock() if store.ClientConn != nil { conn := store.ClientConn diff --git a/engine/access/rpc/backend/connection_factory_test.go b/engine/access/rpc/backend/connection_factory_test.go index 0c3268c745b..22e7a6f64c0 100644 --- a/engine/access/rpc/backend/connection_factory_test.go +++ b/engine/access/rpc/backend/connection_factory_test.go @@ -38,7 +38,7 @@ func TestProxyAccessAPI(t *testing.T) { connectionFactory.CollectionGRPCPort = cn.port // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(ConnectionCacheStore).ClientConn.Close() + evictedValue.(*ConnectionCacheStore).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -76,7 +76,7 @@ func TestProxyExecutionAPI(t *testing.T) { connectionFactory.ExecutionGRPCPort = en.port // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(ConnectionCacheStore).ClientConn.Close() + evictedValue.(*ConnectionCacheStore).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -114,7 +114,7 @@ func TestProxyAccessAPIConnectionReuse(t *testing.T) { connectionFactory.CollectionGRPCPort = cn.port // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(ConnectionCacheStore).ClientConn.Close() + evictedValue.(*ConnectionCacheStore).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -133,7 +133,7 @@ func TestProxyAccessAPIConnectionReuse(t *testing.T) { var conn *grpc.ClientConn res, ok := connectionFactory.ConnectionsCache.Get(proxyConnectionFactory.targetAddress) assert.True(t, ok) - conn = res.(ConnectionCacheStore).ClientConn + conn = res.(*ConnectionCacheStore).ClientConn // check if api client can be rebuilt with retrieved connection accessAPIClient := access.NewAccessAPIClient(conn) @@ -159,7 +159,7 @@ func TestProxyExecutionAPIConnectionReuse(t *testing.T) { connectionFactory.ExecutionGRPCPort = en.port // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(ConnectionCacheStore).ClientConn.Close() + evictedValue.(*ConnectionCacheStore).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -178,7 +178,7 @@ func TestProxyExecutionAPIConnectionReuse(t *testing.T) { var conn *grpc.ClientConn res, ok := connectionFactory.ConnectionsCache.Get(proxyConnectionFactory.targetAddress) assert.True(t, ok) - conn = res.(ConnectionCacheStore).ClientConn + conn = res.(*ConnectionCacheStore).ClientConn // check if api client can be rebuilt with retrieved connection executionAPIClient := execution.NewExecutionAPIClient(conn) @@ -211,7 +211,7 @@ func TestExecutionNodeClientTimeout(t *testing.T) { connectionFactory.ExecutionNodeGRPCTimeout = timeout // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(ConnectionCacheStore).ClientConn.Close() + evictedValue.(*ConnectionCacheStore).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -252,7 +252,7 @@ func TestCollectionNodeClientTimeout(t *testing.T) { connectionFactory.CollectionNodeGRPCTimeout = timeout // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(ConnectionCacheStore).ClientConn.Close() + evictedValue.(*ConnectionCacheStore).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -293,7 +293,7 @@ func TestConnectionPoolFull(t *testing.T) { connectionFactory.CollectionGRPCPort = cn1.port // set the connection pool cache size cache, _ := lru.NewWithEvict(2, func(_, evictedValue interface{}) { - evictedValue.(ConnectionCacheStore).ClientConn.Close() + evictedValue.(*ConnectionCacheStore).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -359,7 +359,7 @@ func TestConnectionPoolStale(t *testing.T) { // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(ConnectionCacheStore).ClientConn.Close() + evictedValue.(*ConnectionCacheStore).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -377,7 +377,7 @@ func TestConnectionPoolStale(t *testing.T) { // close connection to simulate something "going wrong" with our stored connection res, _ := connectionFactory.ConnectionsCache.Get(proxyConnectionFactory.targetAddress) - res.(ConnectionCacheStore).ClientConn.Close() + res.(*ConnectionCacheStore).ClientConn.Close() ctx := context.Background() // make the call to the collection node (should fail, connection closed) @@ -391,7 +391,7 @@ func TestConnectionPoolStale(t *testing.T) { var conn *grpc.ClientConn res, ok := connectionFactory.ConnectionsCache.Get(proxyConnectionFactory.targetAddress) assert.True(t, ok) - conn = res.(ConnectionCacheStore).ClientConn + conn = res.(*ConnectionCacheStore).ClientConn // check if api client can be rebuilt with retrieved connection accessAPIClient := access.NewAccessAPIClient(conn) diff --git a/engine/access/rpc/engine.go b/engine/access/rpc/engine.go index 2a22f6d03a1..7a3732230b0 100644 --- a/engine/access/rpc/engine.go +++ b/engine/access/rpc/engine.go @@ -137,7 +137,9 @@ func NewBuilder(log zerolog.Logger, cacheSize = backend.DefaultConnectionPoolSize } cache, err := lru.NewWithEvict(int(cacheSize), func(_, evictedValue interface{}) { - evictedValue.(backend.ConnectionCacheStore).ClientConn.Close() + // allow time for any existing requests to finish before closing the connection + time.Sleep(backend.DefaultClientTimeout) + evictedValue.(*backend.ConnectionCacheStore).ClientConn.Close() }) if err != nil { return nil, fmt.Errorf("could not initialize connection pool cache: %w", err) From 3bc654fb3e844361d7f965e90487ef951876bdcf Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 14 Jul 2022 08:39:55 -0400 Subject: [PATCH 188/223] rename IsAuthorized -> EnsureAuthorized --- network/message/authorization.go | 4 ++-- network/validator/pubsub/authorized_sender_validator.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index d60a52ce9b8..cd1fbbfb061 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -21,12 +21,12 @@ type MsgAuthConfig struct { Config map[channels.Channel]flow.RoleList } -// IsAuthorized checks if the specified role is authorized to send the message on the provided channel and +// EnsureAuthorized checks if the specified role is authorized to send the message on the provided channel and // asserts that the message is authorized to be sent on the channel. // Expected error returns during normal operations: // * ErrUnauthorizedMessageOnChannel: the channel is not included in the message's list of authorized channels // * ErrUnauthorizedRole: the role is not included in the message's list of authorized roles for the provided channel -func (m MsgAuthConfig) IsAuthorized(role flow.Role, channel channels.Channel) error { +func (m MsgAuthConfig) EnsureAuthorized(role flow.Role, channel channels.Channel) error { authorizedRoles, ok := m.Config[channel] if !ok { return ErrUnauthorizedMessageOnChannel diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index 1fd19c2d580..1043fb3d611 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -114,7 +114,7 @@ func isAuthorizedSender(identity *flow.Identity, channel channels.Channel, msg i channel = channels.Channel(prefix) } - if err := conf.IsAuthorized(identity.Role, channel); err != nil { + if err := conf.EnsureAuthorized(identity.Role, channel); err != nil { return conf.Name, err } From 791c6b9bc217050cd9925f12a24bf7fb35f96529 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 14 Jul 2022 08:44:48 -0400 Subject: [PATCH 189/223] update entity response configuration - add provide collections channel used to respond to entity request for collections - remove ProvideApprovalsByChunk --- network/message/authorization.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index cd1fbbfb061..59dfa3d020e 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -241,8 +241,8 @@ func initializeMessageAuthConfigsMap() { return new(messages.EntityResponse) }, Config: map[channels.Channel]flow.RoleList{ - channels.ProvideApprovalsByChunk: {flow.RoleVerification}, channels.ProvideReceiptsByBlockID: {flow.RoleExecution}, + channels.ProvideCollections: {flow.RoleCollection}, }, } From 18f74004d97bf1aceaf1e5931b3f41ba070898f6 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 14 Jul 2022 08:47:55 -0400 Subject: [PATCH 190/223] remove Transaction from GetMessageAuthConfig switch statement --- network/message/authorization.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index 59dfa3d020e..747ffd7bf8b 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -307,8 +307,6 @@ func GetMessageAuthConfig(v interface{}) (MsgAuthConfig, error) { return authorizationConfigs[CollectionGuarantee], nil case *flow.TransactionBody: return authorizationConfigs[TransactionBody], nil - case *flow.Transaction: - return authorizationConfigs[Transaction], nil // core messages for execution & verification case *flow.ExecutionReceipt: From a0e47046e1f386435c4ce4a6bc16ff1ee2ce32f9 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 14 Jul 2022 08:49:00 -0400 Subject: [PATCH 191/223] remove execution state synchronization messages from GetMessageAuthConfig switch --- network/message/authorization.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index 747ffd7bf8b..f8018091e53 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -313,13 +313,7 @@ func GetMessageAuthConfig(v interface{}) (MsgAuthConfig, error) { return authorizationConfigs[ExecutionReceipt], nil case *flow.ResultApproval: return authorizationConfigs[ResultApproval], nil - - // execution state synchronization - case *messages.ExecutionStateSyncRequest: - return authorizationConfigs[ExecutionStateSyncRequest], nil - case *messages.ExecutionStateDelta: - return authorizationConfigs[ExecutionStateDelta], nil - + // data exchange for execution of blocks case *messages.ChunkDataRequest: return authorizationConfigs[ChunkDataRequest], nil From 91adefe75c9492912fc743612d73d90cf463257e Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 14 Jul 2022 08:49:55 -0400 Subject: [PATCH 192/223] specifying slice length to avoid unnecessary allocation and GC overhead --- network/message/authorization.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index f8018091e53..0d63df26738 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -313,7 +313,7 @@ func GetMessageAuthConfig(v interface{}) (MsgAuthConfig, error) { return authorizationConfigs[ExecutionReceipt], nil case *flow.ResultApproval: return authorizationConfigs[ResultApproval], nil - + // data exchange for execution of blocks case *messages.ChunkDataRequest: return authorizationConfigs[ChunkDataRequest], nil @@ -347,7 +347,7 @@ func GetMessageAuthConfig(v interface{}) (MsgAuthConfig, error) { // GetAllMessageAuthConfigs returns all the configured message auth configurations. func GetAllMessageAuthConfigs() []MsgAuthConfig { - configs := make([]MsgAuthConfig, 0) + configs := make([]MsgAuthConfig, len(authorizationConfigs)) for _, config := range authorizationConfigs { configs = append(configs, config) } From 017ee461ac1a7c938fdd16e4ece3566a522cac41 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 14 Jul 2022 09:04:37 -0400 Subject: [PATCH 193/223] remove Transaction const string --- network/message/init.go | 1 - 1 file changed, 1 deletion(-) diff --git a/network/message/init.go b/network/message/init.go index e69886cb47e..f8d9e585e4c 100644 --- a/network/message/init.go +++ b/network/message/init.go @@ -19,7 +19,6 @@ const ( ClusterBlockVote = "ClusterBlockVote" ClusterBlockResponse = "ClusterBlockResponse" CollectionGuarantee = "CollectionGuarantee" - Transaction = "Transaction" TransactionBody = "TransactionBody" ExecutionReceipt = "ExecutionReceipt" ResultApproval = "ResultApproval" From 6d62de313106e6cf81b067b0f8ad756786306cf7 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 14 Jul 2022 09:09:50 -0400 Subject: [PATCH 194/223] remove execution state sync messages const strings --- network/message/init.go | 46 ++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/network/message/init.go b/network/message/init.go index f8d9e585e4c..a0bf4aee47c 100644 --- a/network/message/init.go +++ b/network/message/init.go @@ -8,28 +8,26 @@ func init() { // string constants for all message types sent on the network const ( - BlockProposal = "BlockProposal" - BlockVote = "BlockVote" - SyncRequest = "SyncRequest" - SyncResponse = "SyncResponse" - RangeRequest = "RangeRequest" - BatchRequest = "BatchRequest" - BlockResponse = "BlockResponse" - ClusterBlockProposal = "ClusterBlockProposal" - ClusterBlockVote = "ClusterBlockVote" - ClusterBlockResponse = "ClusterBlockResponse" - CollectionGuarantee = "CollectionGuarantee" - TransactionBody = "TransactionBody" - ExecutionReceipt = "ExecutionReceipt" - ResultApproval = "ResultApproval" - ExecutionStateSyncRequest = "ExecutionStateSyncRequest" - ExecutionStateDelta = "ExecutionStateDelta" - ChunkDataRequest = "ChunkDataRequest" - ChunkDataResponse = "ChunkDataResponse" - ApprovalRequest = "ApprovalRequest" - ApprovalResponse = "ApprovalResponse" - EntityRequest = "EntityRequest" - EntityResponse = "EntityResponse" - TestMessage = "TestMessage" - DKGMessage = "DKGMessage" + BlockProposal = "BlockProposal" + BlockVote = "BlockVote" + SyncRequest = "SyncRequest" + SyncResponse = "SyncResponse" + RangeRequest = "RangeRequest" + BatchRequest = "BatchRequest" + BlockResponse = "BlockResponse" + ClusterBlockProposal = "ClusterBlockProposal" + ClusterBlockVote = "ClusterBlockVote" + ClusterBlockResponse = "ClusterBlockResponse" + CollectionGuarantee = "CollectionGuarantee" + TransactionBody = "TransactionBody" + ExecutionReceipt = "ExecutionReceipt" + ResultApproval = "ResultApproval" + ChunkDataRequest = "ChunkDataRequest" + ChunkDataResponse = "ChunkDataResponse" + ApprovalRequest = "ApprovalRequest" + ApprovalResponse = "ApprovalResponse" + EntityRequest = "EntityRequest" + EntityResponse = "EntityResponse" + TestMessage = "TestMessage" + DKGMessage = "DKGMessage" ) From 26780f32b70bdd111602a7cc1753921d43d4a983 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 14 Jul 2022 09:10:23 -0400 Subject: [PATCH 195/223] remove append from GetAllMessageAuthConfigs for loop --- network/message/authorization.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/network/message/authorization.go b/network/message/authorization.go index 0d63df26738..8646e1f630e 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -348,8 +348,11 @@ func GetMessageAuthConfig(v interface{}) (MsgAuthConfig, error) { // GetAllMessageAuthConfigs returns all the configured message auth configurations. func GetAllMessageAuthConfigs() []MsgAuthConfig { configs := make([]MsgAuthConfig, len(authorizationConfigs)) + + i := 0 for _, config := range authorizationConfigs { - configs = append(configs, config) + configs[i] = config + i++ } return configs From 7d8cba10a001ede139f7165adef31a69889fd9e5 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 14 Jul 2022 10:13:04 -0500 Subject: [PATCH 196/223] Add comments for returned errors --- ledger/complete/wal/checkpointer.go | 2 ++ model/flow/ledger.go | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ledger/complete/wal/checkpointer.go b/ledger/complete/wal/checkpointer.go index fb512172f3f..34c13f3c55c 100644 --- a/ledger/complete/wal/checkpointer.go +++ b/ledger/complete/wal/checkpointer.go @@ -783,6 +783,8 @@ func readCheckpointV5(f *os.File) ([]*trie.MTrie, error) { } // ReadLastTrieRootHashFromCheckpoint returns last trie's root hash from checkpoint file f. +// All returned errors indicate that the given checkpoint file is eiter corrupted or +// incompatible. As the function is side-effect free, all failures are simple a no-op. func ReadLastTrieRootHashFromCheckpoint(f *os.File) (hash.Hash, error) { // read checkpoint version diff --git a/model/flow/ledger.go b/model/flow/ledger.go index 5610db17d24..bb005fdeb5e 100644 --- a/model/flow/ledger.go +++ b/model/flow/ledger.go @@ -108,8 +108,11 @@ type StateCommitment hash.Hash // although it can represent a valid state commitment. var DummyStateCommitment = StateCommitment(hash.DummyHash) -// ToStateCommitment converts a byte slice into a StateComitment. +// ToStateCommitment converts a byte slice into a StateCommitment. // It returns an error if the slice has an invalid length. +// The returned error indicates that the given byte slice is not a +// valid root hash of an execution state. As the function is +// side-effect free, all failures are simply a no-op. func ToStateCommitment(stateBytes []byte) (StateCommitment, error) { var state StateCommitment if len(stateBytes) != len(state) { From a6c1732f6853ae02f7ec09ff0160ff34e164e78c Mon Sep 17 00:00:00 2001 From: Patrick Lee Date: Thu, 14 Jul 2022 09:31:54 -0700 Subject: [PATCH 197/223] change unittest fixture to use *flow.Header instead of flow.Header Make test code more consistent with regular code --- cmd/bootstrap/run/block.go | 8 +- .../cmd/rollback_executed_height_test.go | 24 ++--- consensus/follower_test.go | 2 +- .../hotstuff/eventloop/event_loop_test.go | 10 +- consensus/hotstuff/model/proposal.go | 4 +- .../verification/combined_signer_v2_test.go | 2 +- .../verification/combined_signer_v3_test.go | 4 +- .../verification/staking_signer_test.go | 2 +- .../voteaggregator/vote_aggregator_test.go | 2 +- .../combined_vote_processor_v2_test.go | 2 +- consensus/recovery/recover_test.go | 4 +- engine/access/access_test.go | 12 +-- engine/access/rest/accounts_test.go | 4 +- engine/access/rest/events_test.go | 2 +- engine/access/rpc/backend/backend_test.go | 12 +-- engine/collection/epochmgr/engine_test.go | 4 +- .../collection/synchronization/engine_test.go | 2 +- engine/common/follower/engine_test.go | 2 +- engine/common/synchronization/engine_test.go | 6 +- .../approvals/approval_collector_test.go | 2 +- .../assignment_collector_statemachine_test.go | 6 +- .../assignment_collector_tree_test.go | 18 ++-- .../caching_assignment_collector_test.go | 4 +- engine/consensus/approvals/testutil.go | 20 ++-- .../verifying_assignment_collector_test.go | 16 +-- engine/consensus/compliance/engine_test.go | 8 +- engine/consensus/dkg/reactor_engine_test.go | 4 +- engine/consensus/ingestion/core_test.go | 8 +- engine/consensus/matching/engine_test.go | 6 +- engine/consensus/sealing/core_test.go | 98 +++++++++---------- engine/consensus/sealing/engine_test.go | 10 +- engine/execution/computation/manager_test.go | 16 +-- engine/execution/ingestion/engine_test.go | 24 ++--- engine/verification/assigner/engine_test.go | 2 +- engine/verification/utils/unittest/fixture.go | 2 +- engine/verification/utils/unittest/helper.go | 2 +- fvm/blocks_test.go | 64 ++++++------ fvm/fvm_blockcontext_test.go | 4 +- fvm/transactionInvoker_test.go | 2 +- model/flow/header_test.go | 11 +-- module/buffer/backend_test.go | 4 +- module/builder/collection/builder.go | 6 +- module/builder/consensus/builder.go | 9 -- module/builder/consensus/builder_test.go | 2 +- module/chunks/chunkVerifier_test.go | 4 +- module/finalizer/collection/finalizer_test.go | 2 +- module/finalizer/consensus/finalizer_test.go | 14 +-- module/jobqueue/jobs_test.go | 4 +- .../execution_data_cid_cache_test.go | 16 +-- .../jobs/execution_data_reader_test.go | 2 +- module/synchronization/core_rapid_test.go | 36 +++---- module/synchronization/core_test.go | 20 ++-- network/p2p/topic_validator_test.go | 12 +-- state/fork/traversal_test.go | 22 ++--- state/protocol/events/gadgets/heights_test.go | 2 +- state/protocol/events/gadgets/views_test.go | 2 +- storage/badger/operation/headers_test.go | 6 +- utils/unittest/fixtures.go | 34 +++---- utils/unittest/incorporated_results_seals.go | 2 +- 59 files changed, 312 insertions(+), 322 deletions(-) diff --git a/cmd/bootstrap/run/block.go b/cmd/bootstrap/run/block.go index be9908dfa1b..d5a4a10a38d 100644 --- a/cmd/bootstrap/run/block.go +++ b/cmd/bootstrap/run/block.go @@ -8,11 +8,11 @@ import ( func GenerateRootBlock(chainID flow.ChainID, parentID flow.Identifier, height uint64, timestamp time.Time) *flow.Block { - payload := flow.Payload{ + payload := &flow.Payload{ Guarantees: nil, Seals: nil, } - header := flow.Header{ + header := &flow.Header{ ChainID: chainID, ParentID: parentID, Height: height, @@ -26,7 +26,7 @@ func GenerateRootBlock(chainID flow.ChainID, parentID flow.Identifier, height ui } return &flow.Block{ - Header: &header, - Payload: &payload, + Header: header, + Payload: payload, } } diff --git a/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height_test.go b/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height_test.go index 3acddfefb29..48afa78ad23 100644 --- a/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height_test.go +++ b/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height_test.go @@ -24,7 +24,7 @@ func TestReExecuteBlock(t *testing.T) { // bootstrap to init highest executed height bootstrapper := bootstrap.NewBootstrapper(unittest.Logger()) genesis := unittest.BlockHeaderFixture() - err := bootstrapper.BootstrapExecutionDatabase(db, unittest.StateCommitmentFixture(), &genesis) + err := bootstrapper.BootstrapExecutionDatabase(db, unittest.StateCommitmentFixture(), genesis) require.NoError(t, err) // create all modules @@ -42,7 +42,7 @@ func TestReExecuteBlock(t *testing.T) { transactions := bstorage.NewTransactions(metrics, db) collections := bstorage.NewCollections(db, transactions) - err = headers.Store(&genesis) + err = headers.Store(genesis) require.NoError(t, err) // create execution state module @@ -64,7 +64,7 @@ func TestReExecuteBlock(t *testing.T) { require.NotNil(t, es) // prepare data - header := unittest.BlockHeaderWithParentFixture(&genesis) // make sure the height is higher than genesis + header := unittest.BlockHeaderWithParentFixture(genesis) // make sure the height is higher than genesis executionReceipt := unittest.ExecutionReceiptFixture() executionReceipt.ExecutionResult.BlockID = header.ID() cdp := make([]*flow.ChunkDataPack, 0, len(executionReceipt.ExecutionResult.Chunks)) @@ -78,13 +78,13 @@ func TestReExecuteBlock(t *testing.T) { se := unittest.BlockEventsFixture(header, 8) tes := unittest.TransactionResultsFixture(4) - err = headers.Store(&header) + err = headers.Store(header) require.NoError(t, err) // save execution results err = es.SaveExecutionResults( context.Background(), - &header, + header, endState, cdp, executionReceipt, @@ -113,7 +113,7 @@ func TestReExecuteBlock(t *testing.T) { // re execute result err = es.SaveExecutionResults( context.Background(), - &header, + header, endState, cdp, executionReceipt, @@ -134,7 +134,7 @@ func TestReExecuteBlockWithDifferentResult(t *testing.T) { // bootstrap to init highest executed height bootstrapper := bootstrap.NewBootstrapper(unittest.Logger()) genesis := unittest.BlockHeaderFixture() - err := bootstrapper.BootstrapExecutionDatabase(db, unittest.StateCommitmentFixture(), &genesis) + err := bootstrapper.BootstrapExecutionDatabase(db, unittest.StateCommitmentFixture(), genesis) require.NoError(t, err) // create all modules @@ -152,7 +152,7 @@ func TestReExecuteBlockWithDifferentResult(t *testing.T) { transactions := bstorage.NewTransactions(metrics, db) collections := bstorage.NewCollections(db, transactions) - err = headers.Store(&genesis) + err = headers.Store(genesis) require.NoError(t, err) // create execution state module @@ -174,7 +174,7 @@ func TestReExecuteBlockWithDifferentResult(t *testing.T) { require.NotNil(t, es) // prepare data - header := unittest.BlockHeaderWithParentFixture(&genesis) // make sure the height is higher than genesis + header := unittest.BlockHeaderWithParentFixture(genesis) // make sure the height is higher than genesis executionReceipt := unittest.ExecutionReceiptFixture() executionReceipt.ExecutionResult.BlockID = header.ID() cdp := make([]*flow.ChunkDataPack, 0, len(executionReceipt.ExecutionResult.Chunks)) @@ -188,13 +188,13 @@ func TestReExecuteBlockWithDifferentResult(t *testing.T) { se := unittest.BlockEventsFixture(header, 8) tes := unittest.TransactionResultsFixture(4) - err = headers.Store(&header) + err = headers.Store(header) require.NoError(t, err) // save execution results err = es.SaveExecutionResults( context.Background(), - &header, + header, endState, cdp, executionReceipt, @@ -232,7 +232,7 @@ func TestReExecuteBlockWithDifferentResult(t *testing.T) { // re execute result err = es.SaveExecutionResults( context.Background(), - &header, + header, endState2, cdp2, executionReceipt2, diff --git a/consensus/follower_test.go b/consensus/follower_test.go index 70cae98c718..48b0c5c28f7 100644 --- a/consensus/follower_test.go +++ b/consensus/follower_test.go @@ -342,5 +342,5 @@ func (mc *MockConsensus) extendBlock(blockView uint64, parent *flow.Header) *flo nextBlock.ProposerID = mc.identities[int(blockView)%len(mc.identities)].NodeID signerIndices, _ := signature.EncodeSignersToIndices(mc.identities.NodeIDs(), mc.identities.NodeIDs()) nextBlock.ParentVoterIndices = signerIndices - return &nextBlock + return nextBlock } diff --git a/consensus/hotstuff/eventloop/event_loop_test.go b/consensus/hotstuff/eventloop/event_loop_test.go index 464655ce595..db37ccaf5d3 100644 --- a/consensus/hotstuff/eventloop/event_loop_test.go +++ b/consensus/hotstuff/eventloop/event_loop_test.go @@ -73,12 +73,12 @@ func (s *EventLoopTestSuite) TestReadyDone() { // Test_SubmitQC tests that submitted proposal is eventually sent to event handler for processing func (s *EventLoopTestSuite) Test_SubmitProposal() { proposal := unittest.BlockHeaderFixture() - expectedProposal := model.ProposalFromFlow(&proposal, proposal.View-1) + expectedProposal := model.ProposalFromFlow(proposal, proposal.View-1) processed := atomic.NewBool(false) s.eh.On("OnReceiveProposal", expectedProposal).Run(func(args mock.Arguments) { processed.Store(true) }).Return(nil).Once() - s.eventLoop.SubmitProposal(&proposal, proposal.View-1) + s.eventLoop.SubmitProposal(proposal, proposal.View-1) require.Eventually(s.T(), processed.Load, time.Millisecond*100, time.Millisecond*10) s.eh.AssertExpectations(s.T()) } @@ -136,7 +136,7 @@ func TestEventLoop_Timeout(t *testing.T) { defer wg.Done() for !processed.Load() { proposal := unittest.BlockHeaderFixture() - eventLoop.SubmitProposal(&proposal, proposal.View-1) + eventLoop.SubmitProposal(proposal, proposal.View-1) } }() @@ -177,8 +177,8 @@ func TestReadyDoneWithStartTime(t *testing.T) { unittest.RequireCloseBefore(t, eventLoop.Ready(), 100*time.Millisecond, "event loop not started") parentBlock := unittest.BlockHeaderFixture() - block := unittest.BlockHeaderWithParentFixture(&parentBlock) - eventLoop.SubmitProposal(&block, parentBlock.View) + block := unittest.BlockHeaderWithParentFixture(parentBlock) + eventLoop.SubmitProposal(block, parentBlock.View) unittest.RequireCloseBefore(t, done, startTimeDuration+100*time.Millisecond, "proposal wasn't received") cancel() diff --git a/consensus/hotstuff/model/proposal.go b/consensus/hotstuff/model/proposal.go index d0f31290d15..ba0a342df0b 100644 --- a/consensus/hotstuff/model/proposal.go +++ b/consensus/hotstuff/model/proposal.go @@ -39,7 +39,7 @@ func ProposalFromFlow(header *flow.Header, parentView uint64) *Proposal { func ProposalToFlow(proposal *Proposal) *flow.Header { block := proposal.Block - header := flow.Header{ + header := &flow.Header{ ParentID: block.QC.BlockID, PayloadHash: block.PayloadHash, Timestamp: block.Timestamp, @@ -50,5 +50,5 @@ func ProposalToFlow(proposal *Proposal) *flow.Header { ProposerSigData: proposal.SigData, } - return &header + return header } diff --git a/consensus/hotstuff/verification/combined_signer_v2_test.go b/consensus/hotstuff/verification/combined_signer_v2_test.go index 914f89b3ecb..f084aec684c 100644 --- a/consensus/hotstuff/verification/combined_signer_v2_test.go +++ b/consensus/hotstuff/verification/combined_signer_v2_test.go @@ -194,7 +194,7 @@ func Test_VerifyQC_EmptySigners(t *testing.T) { verifier := NewCombinedVerifier(committee, packer) header := unittest.BlockHeaderFixture() - block := model.BlockFromFlow(&header, header.View-1) + block := model.BlockFromFlow(header, header.View-1) sigData := unittest.QCSigDataFixture() err := verifier.VerifyQC([]*flow.Identity{}, sigData, block) diff --git a/consensus/hotstuff/verification/combined_signer_v3_test.go b/consensus/hotstuff/verification/combined_signer_v3_test.go index b3afacc2b4e..614c4673840 100644 --- a/consensus/hotstuff/verification/combined_signer_v3_test.go +++ b/consensus/hotstuff/verification/combined_signer_v3_test.go @@ -155,7 +155,7 @@ func TestCombinedSignWithNoDKGKeyV3(t *testing.T) { // Test_VerifyQC checks that a QC where either signer list is empty is rejected as invalid func Test_VerifyQCV3(t *testing.T) { header := unittest.BlockHeaderFixture() - block := model.BlockFromFlow(&header, header.View-1) + block := model.BlockFromFlow(header, header.View-1) msg := MakeVoteMessage(block.View, block.BlockID) // generate some BLS key as a stub of the random beacon group key and use it to generate a reconstructed beacon sig @@ -268,7 +268,7 @@ func Test_VerifyQC_EmptySignersV3(t *testing.T) { verifier := NewCombinedVerifier(committee, packer) header := unittest.BlockHeaderFixture() - block := model.BlockFromFlow(&header, header.View-1) + block := model.BlockFromFlow(header, header.View-1) sigData := unittest.QCSigDataFixture() err := verifier.VerifyQC([]*flow.Identity{}, sigData, block) diff --git a/consensus/hotstuff/verification/staking_signer_test.go b/consensus/hotstuff/verification/staking_signer_test.go index f9b083ca105..9122d0f067c 100644 --- a/consensus/hotstuff/verification/staking_signer_test.go +++ b/consensus/hotstuff/verification/staking_signer_test.go @@ -110,7 +110,7 @@ func TestStakingSigner_CreateVote(t *testing.T) { // TestStakingSigner_VerifyQC checks that a QC without any signers is rejected right away without calling into any sub-components func TestStakingSigner_VerifyQC(t *testing.T) { header := unittest.BlockHeaderFixture() - block := model.BlockFromFlow(&header, header.View-1) + block := model.BlockFromFlow(header, header.View-1) sigData := unittest.RandomBytes(127) verifier := NewStakingVerifier() diff --git a/consensus/hotstuff/voteaggregator/vote_aggregator_test.go b/consensus/hotstuff/voteaggregator/vote_aggregator_test.go index ca6cb40d7f3..bb670288357 100644 --- a/consensus/hotstuff/voteaggregator/vote_aggregator_test.go +++ b/consensus/hotstuff/voteaggregator/vote_aggregator_test.go @@ -72,7 +72,7 @@ func (s *VoteAggregatorTestSuite) TearDownTest() { func (s *VoteAggregatorTestSuite) TestOnFinalizedBlock() { finalizedBlock := unittest.BlockHeaderFixture(unittest.HeaderWithView(100)) s.collectors.On("PruneUpToView", finalizedBlock.View).Once() - s.aggregator.OnFinalizedBlock(model.BlockFromFlow(&finalizedBlock, finalizedBlock.View-1)) + s.aggregator.OnFinalizedBlock(model.BlockFromFlow(finalizedBlock, finalizedBlock.View-1)) require.Eventually(s.T(), func() bool { return s.collectors.AssertCalled(s.T(), "PruneUpToView", finalizedBlock.View) diff --git a/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go b/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go index 953a9e8d309..c4142ec2bdf 100644 --- a/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go +++ b/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go @@ -934,7 +934,7 @@ func TestReadRandomSourceFromPackedQCV2(t *testing.T) { // making a mock block header := unittest.BlockHeaderFixture() - block := model.BlockFromFlow(&header, header.View-1) + block := model.BlockFromFlow(header, header.View-1) // create a packer committee := &mockhotstuff.Committee{} diff --git a/consensus/recovery/recover_test.go b/consensus/recovery/recover_test.go index 795badbadfb..1afe56f3986 100644 --- a/consensus/recovery/recover_test.go +++ b/consensus/recovery/recover_test.go @@ -14,7 +14,7 @@ import ( func TestRecover(t *testing.T) { finalized := unittest.BlockHeaderFixture() - blocks := unittest.ChainFixtureFrom(100, &finalized) + blocks := unittest.ChainFixtureFrom(100, finalized) pending := make([]*flow.Header, 0) for _, b := range blocks { @@ -47,7 +47,7 @@ func TestRecover(t *testing.T) { return nil }) - err := Recover(unittest.Logger(), &finalized, pending, validator, onProposal) + err := Recover(unittest.Logger(), finalized, pending, validator, onProposal) require.NoError(t, err) // only pending blocks are valid diff --git a/engine/access/access_test.go b/engine/access/access_test.go index 401f5880028..503f1a38675 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -80,7 +80,7 @@ func (suite *Suite) SetupTest() { suite.snapshot.On("Epochs").Return(suite.epochQuery).Maybe() header := unittest.BlockHeaderFixture() params := new(protocol.Params) - params.On("Root").Return(&header, nil) + params.On("Root").Return(header, nil) suite.state.On("Params").Return(params).Maybe() suite.collClient = new(accessmock.AccessAPIClient) @@ -152,12 +152,12 @@ func (suite *Suite) TestSendAndGetTransaction() { refSnapshot. On("Head"). - Return(&referenceBlock, nil). + Return(referenceBlock, nil). Twice() suite.snapshot. On("Head"). - Return(&referenceBlock, nil). + Return(referenceBlock, nil). Once() expected := convert.TransactionToMessage(transaction.TransactionBody) @@ -210,12 +210,12 @@ func (suite *Suite) TestSendExpiredTransaction() { refSnapshot. On("Head"). - Return(&referenceBlock, nil). + Return(referenceBlock, nil). Twice() suite.snapshot. On("Head"). - Return(&latestBlock, nil). + Return(latestBlock, nil). Once() req := &accessproto.SendTransactionRequest{ @@ -243,7 +243,7 @@ func (suite *Suite) TestSendTransactionToRandomCollectionNode() { // setup the state and snapshot mock expectations suite.state.On("AtBlockID", referenceBlock.ID()).Return(suite.snapshot, nil) - suite.snapshot.On("Head").Return(&referenceBlock, nil) + suite.snapshot.On("Head").Return(referenceBlock, nil) // create storage metrics := metrics.NewNoopCollector() diff --git a/engine/access/rest/accounts_test.go b/engine/access/rest/accounts_test.go index f05324373ad..a5d5725757c 100644 --- a/engine/access/rest/accounts_test.go +++ b/engine/access/rest/accounts_test.go @@ -45,7 +45,7 @@ func TestGetAccount(t *testing.T) { backend.Mock. On("GetLatestBlockHeader", mocktestify.Anything, true). - Return(&block, nil) + Return(block, nil) backend.Mock. On("GetAccountAtBlockHeight", mocktestify.Anything, account.Address, height). @@ -66,7 +66,7 @@ func TestGetAccount(t *testing.T) { req := getAccountRequest(t, account, finalHeightQueryParam, expandableFieldKeys, expandableFieldContracts) backend.Mock. On("GetLatestBlockHeader", mocktestify.Anything, false). - Return(&block, nil) + Return(block, nil) backend.Mock. On("GetAccountAtBlockHeight", mocktestify.Anything, account.Address, height). Return(account, nil) diff --git a/engine/access/rest/events_test.go b/engine/access/rest/events_test.go index c06f9b4dc30..da556c3142f 100644 --- a/engine/access/rest/events_test.go +++ b/engine/access/rest/events_test.go @@ -180,7 +180,7 @@ func generateEventsMocks(backend *mock.API, n int) []flow.BlockEvents { backend.Mock. On("GetLatestBlockHeader", mocks.Anything, true). - Return(&latestBlock, nil) + Return(latestBlock, nil) return events } diff --git a/engine/access/rpc/backend/backend_test.go b/engine/access/rpc/backend/backend_test.go index 193752ed940..c04221f5f7b 100644 --- a/engine/access/rpc/backend/backend_test.go +++ b/engine/access/rpc/backend/backend_test.go @@ -64,7 +64,7 @@ func (suite *Suite) SetupTest() { suite.snapshot = new(protocol.Snapshot) header := unittest.BlockHeaderFixture() params := new(protocol.Params) - params.On("Root").Return(&header, nil) + params.On("Root").Return(header, nil) suite.state.On("Params").Return(params).Maybe() suite.blocks = new(storagemock.Blocks) suite.headers = new(storagemock.Headers) @@ -118,7 +118,7 @@ func (suite *Suite) TestGetLatestFinalizedBlockHeader() { // setup the mocks block := unittest.BlockHeaderFixture() suite.state.On("Final").Return(suite.snapshot, nil).Maybe() - suite.snapshot.On("Head").Return(&block, nil).Once() + suite.snapshot.On("Head").Return(block, nil).Once() backend := New( suite.state, @@ -484,7 +484,7 @@ func (suite *Suite) TestGetLatestSealedBlockHeader() { suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() block := unittest.BlockHeaderFixture() - suite.snapshot.On("Head").Return(&block, nil).Once() + suite.snapshot.On("Head").Return(block, nil).Once() backend := New( suite.state, @@ -666,7 +666,7 @@ func (suite *Suite) TestGetTransactionResultByIndex() { func (suite *Suite) TestGetTransactionResultsByBlockID() { head := unittest.BlockHeaderFixture() suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() - suite.snapshot.On("Head").Return(&head, nil) + suite.snapshot.On("Head").Return(head, nil) ctx := context.Background() block := unittest.BlockFixture() @@ -1548,7 +1548,7 @@ func (suite *Suite) TestGetEventsForHeightRange() { rootHeader := unittest.BlockHeaderFixture() params := new(protocol.Params) - params.On("Root").Return(&rootHeader, nil) + params.On("Root").Return(rootHeader, nil) state.On("Params").Return(params).Maybe() // mock snapshot to return head backend @@ -1579,7 +1579,7 @@ func (suite *Suite) TestGetEventsForHeightRange() { setupHeadHeight := func(height uint64) { header := unittest.BlockHeaderFixture() // create a mock header header.Height = height // set the header height - head = &header + head = header } setupStorage := func(min uint64, max uint64) ([]*flow.Header, []*flow.ExecutionReceipt, flow.IdentityList) { diff --git a/engine/collection/epochmgr/engine_test.go b/engine/collection/epochmgr/engine_test.go index 6ad8ece47c5..009b67320d4 100644 --- a/engine/collection/epochmgr/engine_test.go +++ b/engine/collection/epochmgr/engine_test.go @@ -261,7 +261,7 @@ func (suite *Suite) TestRespondToPhaseChange() { first := unittest.BlockHeaderFixture() suite.state.On("AtBlockID", first.ID()).Return(suite.snap) - suite.engine.EpochSetupPhaseStarted(0, &first) + suite.engine.EpochSetupPhaseStarted(0, first) unittest.AssertClosesBefore(suite.T(), called, time.Second) suite.voter.AssertExpectations(suite.T()) @@ -291,7 +291,7 @@ func (suite *Suite) TestRespondToEpochTransition() { // mock the epoch transition suite.TransitionEpoch() // notify the engine of the epoch transition - suite.engine.EpochTransition(suite.counter, &first) + suite.engine.EpochTransition(suite.counter, first) unittest.AssertClosesBefore(suite.T(), done, time.Second) suite.Assert().NotNil(expiryCallback) diff --git a/engine/collection/synchronization/engine_test.go b/engine/collection/synchronization/engine_test.go index f3fc1a6a31d..ab02e11f72b 100644 --- a/engine/collection/synchronization/engine_test.go +++ b/engine/collection/synchronization/engine_test.go @@ -63,7 +63,7 @@ func (ss *SyncSuite) SetupTest() { // generate a header for the final state header := unittest.BlockHeaderFixture() - ss.head = &header + ss.head = header // create maps to enable block returns ss.heights = make(map[uint64]*clustermodel.Block) diff --git a/engine/common/follower/engine_test.go b/engine/common/follower/engine_test.go index edf0fe3617a..f921f6e9d70 100644 --- a/engine/common/follower/engine_test.go +++ b/engine/common/follower/engine_test.go @@ -155,7 +155,7 @@ func (suite *Suite) TestHandleProposalSkipProposalThreshold() { // mock latest finalized state final := unittest.BlockHeaderFixture() - suite.snapshot.On("Head").Return(&final, nil) + suite.snapshot.On("Head").Return(final, nil) originID := unittest.IdentifierFixture() block := unittest.BlockFixture() diff --git a/engine/common/synchronization/engine_test.go b/engine/common/synchronization/engine_test.go index 22bfa9faf6b..8305b810120 100644 --- a/engine/common/synchronization/engine_test.go +++ b/engine/common/synchronization/engine_test.go @@ -70,7 +70,7 @@ func (ss *SyncSuite) SetupTest() { // generate a header for the final state header := unittest.BlockHeaderFixture() - ss.head = &header + ss.head = header // create maps to enable block returns ss.heights = make(map[uint64]*flow.Block) @@ -518,13 +518,13 @@ func (ss *SyncSuite) TestProcessingMultipleItems() { func (ss *SyncSuite) TestOnFinalizedBlock() { finalizedBlock := unittest.BlockHeaderWithParentFixture(ss.head) // change head - ss.head = &finalizedBlock + ss.head = finalizedBlock err := ss.e.finalizedHeader.updateHeader() require.NoError(ss.T(), err) actualHeader := ss.e.finalizedHeader.Get() require.ElementsMatch(ss.T(), ss.e.participantsProvider.Identifiers(), ss.participants[1:].NodeIDs()) - require.Equal(ss.T(), actualHeader, &finalizedBlock) + require.Equal(ss.T(), actualHeader, finalizedBlock) } // TestProcessUnsupportedMessageType tests that Process and ProcessLocal correctly handle a case where invalid message type diff --git a/engine/consensus/approvals/approval_collector_test.go b/engine/consensus/approvals/approval_collector_test.go index 6812027bb05..e3d035b8a92 100644 --- a/engine/consensus/approvals/approval_collector_test.go +++ b/engine/consensus/approvals/approval_collector_test.go @@ -34,7 +34,7 @@ func (s *ApprovalCollectorTestSuite) SetupTest() { s.sealsPL = &mempool.IncorporatedResultSeals{} var err error - s.collector, err = NewApprovalCollector(unittest.Logger(), s.IncorporatedResult, &s.IncorporatedBlock, &s.Block, s.ChunksAssignment, s.sealsPL, uint(len(s.AuthorizedVerifiers))) + s.collector, err = NewApprovalCollector(unittest.Logger(), s.IncorporatedResult, s.IncorporatedBlock, s.Block, s.ChunksAssignment, s.sealsPL, uint(len(s.AuthorizedVerifiers))) require.NoError(s.T(), err) } diff --git a/engine/consensus/approvals/assignment_collector_statemachine_test.go b/engine/consensus/approvals/assignment_collector_statemachine_test.go index 885d5bd8471..65ed3210fea 100644 --- a/engine/consensus/approvals/assignment_collector_statemachine_test.go +++ b/engine/consensus/approvals/assignment_collector_statemachine_test.go @@ -39,7 +39,7 @@ func (s *AssignmentCollectorStateMachineTestSuite) SetupTest() { approvalConduit: s.Conduit, requestTracker: s.RequestTracker, requiredApprovalsForSealConstruction: 5, - executedBlock: &s.Block, + executedBlock: s.Block, result: s.IncorporatedResult.Result, resultID: s.IncorporatedResult.Result.ID(), }) @@ -54,8 +54,8 @@ func (s *AssignmentCollectorStateMachineTestSuite) TestChangeProcessingStatus_Ca s.PublicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) for i := range results { - block := unittest.BlockHeaderWithParentFixture(&s.Block) - s.Blocks[block.ID()] = &block + block := unittest.BlockHeaderWithParentFixture(s.Block) + s.Blocks[block.ID()] = block result := unittest.IncorporatedResult.Fixture( unittest.IncorporatedResult.WithIncorporatedBlockID(block.ID()), unittest.IncorporatedResult.WithResult(s.IncorporatedResult.Result), diff --git a/engine/consensus/approvals/assignment_collector_tree_test.go b/engine/consensus/approvals/assignment_collector_tree_test.go index 043a8412d4f..b539584c593 100644 --- a/engine/consensus/approvals/assignment_collector_tree_test.go +++ b/engine/consensus/approvals/assignment_collector_tree_test.go @@ -52,7 +52,7 @@ func (s *AssignmentCollectorTreeSuite) SetupTest() { } s.mockedCollectors = make(map[flow.Identifier]*mockedCollectorWrapper) - s.collectorTree = approvals.NewAssignmentCollectorTree(&s.ParentBlock, s.Headers, s.factoryMethod) + s.collectorTree = approvals.NewAssignmentCollectorTree(s.ParentBlock, s.Headers, s.factoryMethod) s.prepareMockedCollector(s.IncorporatedResult.Result) } @@ -96,7 +96,7 @@ func requireStateTransition(wrapper *mockedCollectorWrapper, oldState, newState func (s *AssignmentCollectorTreeSuite) TestGetSize_ConcurrentAccess() { numberOfWorkers := 10 batchSize := 10 - chain := unittest.ChainFixtureFrom(numberOfWorkers*batchSize, &s.IncorporatedBlock) + chain := unittest.ChainFixtureFrom(numberOfWorkers*batchSize, s.IncorporatedBlock) result0 := unittest.ExecutionResultFixture() receipts := unittest.ReceiptChainFor(chain, result0) for _, block := range chain { @@ -144,7 +144,7 @@ func (s *AssignmentCollectorTreeSuite) TestGetCollector() { // TestGetCollectorsByInterval tests that GetCollectorsByInterval returns a slice // with the AssignmentCollectors from the requested interval func (s *AssignmentCollectorTreeSuite) TestGetCollectorsByInterval() { - chain := unittest.ChainFixtureFrom(10, &s.ParentBlock) + chain := unittest.ChainFixtureFrom(10, s.ParentBlock) receipts := unittest.ReceiptChainFor(chain, s.IncorporatedResult.Result) for _, block := range chain { s.Blocks[block.ID()] = block.Header @@ -225,7 +225,7 @@ func (s *AssignmentCollectorTreeSuite) TestGetOrCreateCollector_CollectorParentI // TestGetOrCreateCollector_AddingSealedCollector tests a case when we are trying to add collector which is already sealed. // Leveled forest doesn't accept vertexes lower than the lowest height. func (s *AssignmentCollectorTreeSuite) TestGetOrCreateCollector_AddingSealedCollector() { - block := unittest.BlockWithParentFixture(&s.ParentBlock) + block := unittest.BlockWithParentFixture(s.ParentBlock) s.Blocks[block.ID()] = block.Header result := unittest.ExecutionResultFixture(unittest.WithBlock(block)) s.prepareMockedCollector(result) @@ -234,8 +234,8 @@ func (s *AssignmentCollectorTreeSuite) TestGetOrCreateCollector_AddingSealedColl prevSealedBlock := block.Header for i := 0; i < 5; i++ { sealedBlock := unittest.BlockHeaderWithParentFixture(prevSealedBlock) - s.MarkFinalized(&sealedBlock) - _ = s.collectorTree.FinalizeForkAtLevel(&sealedBlock, &sealedBlock) + s.MarkFinalized(sealedBlock) + _ = s.collectorTree.FinalizeForkAtLevel(sealedBlock, sealedBlock) } // now adding a collector which is lower than sealed height should result in error @@ -268,7 +268,7 @@ func (s *AssignmentCollectorTreeSuite) TestFinalizeForkAtLevel_ProcessableAfterS unittest.WithExecutionResultBlockID(s.IncorporatedBlock.ID())) s.prepareMockedCollector(firstResult) for i := 0; i < len(forks); i++ { - fork := unittest.ChainFixtureFrom(3, &s.IncorporatedBlock) + fork := unittest.ChainFixtureFrom(3, s.IncorporatedBlock) forks[i] = fork prevResult := firstResult // create execution results for all blocks except last one, since it won't be valid by definition @@ -302,7 +302,7 @@ func (s *AssignmentCollectorTreeSuite) TestFinalizeForkAtLevel_ProcessableAfterS finalized := forks[0][0].Header - s.MarkFinalized(&s.IncorporatedBlock) + s.MarkFinalized(s.IncorporatedBlock) s.MarkFinalized(finalized) // at this point collectors for forks[0] should be processable and for forks[1] not @@ -322,7 +322,7 @@ func (s *AssignmentCollectorTreeSuite) TestFinalizeForkAtLevel_ProcessableAfterS } // A becomes sealed, B becomes finalized - err := s.collectorTree.FinalizeForkAtLevel(finalized, &s.Block) + err := s.collectorTree.FinalizeForkAtLevel(finalized, s.Block) require.NoError(s.T(), err) for forkIndex := range forks { diff --git a/engine/consensus/approvals/caching_assignment_collector_test.go b/engine/consensus/approvals/caching_assignment_collector_test.go index 947c54b26d9..2a27faeb018 100644 --- a/engine/consensus/approvals/caching_assignment_collector_test.go +++ b/engine/consensus/approvals/caching_assignment_collector_test.go @@ -16,7 +16,7 @@ import ( type CachingAssignmentCollectorTestSuite struct { suite.Suite - executedBlock flow.Header + executedBlock *flow.Header result *flow.ExecutionResult collector *CachingAssignmentCollector } @@ -31,7 +31,7 @@ func (s *CachingAssignmentCollectorTestSuite) SetupTest() { result.BlockID = s.executedBlock.ID() }) s.collector = NewCachingAssignmentCollector(AssignmentCollectorBase{ - executedBlock: &s.executedBlock, + executedBlock: s.executedBlock, result: s.result, resultID: s.result.ID(), }) diff --git a/engine/consensus/approvals/testutil.go b/engine/consensus/approvals/testutil.go index 27affe5d6fd..df5e98fa36b 100644 --- a/engine/consensus/approvals/testutil.go +++ b/engine/consensus/approvals/testutil.go @@ -25,9 +25,9 @@ import ( type BaseApprovalsTestSuite struct { suite.Suite - ParentBlock flow.Header // parent of sealing candidate - Block flow.Header // candidate for sealing - IncorporatedBlock flow.Header // block that incorporated result + ParentBlock *flow.Header // parent of sealing candidate + Block *flow.Header // candidate for sealing + IncorporatedBlock *flow.Header // block that incorporated result VerID flow.Identifier // for convenience, node id of first verifier Chunks flow.ChunkList // list of chunks of execution result ChunksAssignment *chunks.Assignment @@ -39,7 +39,7 @@ type BaseApprovalsTestSuite struct { func (s *BaseApprovalsTestSuite) SetupTest() { s.ParentBlock = unittest.BlockHeaderFixture() - s.Block = unittest.BlockHeaderWithParentFixture(&s.ParentBlock) + s.Block = unittest.BlockHeaderWithParentFixture(s.ParentBlock) verifiers := make(flow.IdentifierList, 0) s.AuthorizedVerifiers = make(map[flow.Identifier]*flow.Identity) s.ChunksAssignment = chunks.NewAssignment() @@ -67,7 +67,7 @@ func (s *BaseApprovalsTestSuite) SetupTest() { result.BlockID = s.Block.ID() result.Chunks = s.Chunks - s.IncorporatedBlock = unittest.BlockHeaderWithParentFixture(&s.Block) + s.IncorporatedBlock = unittest.BlockHeaderWithParentFixture(s.Block) // compose incorporated result s.IncorporatedResult = unittest.IncorporatedResult.Fixture( @@ -106,14 +106,14 @@ func (s *BaseAssignmentCollectorTestSuite) SetupTest() { s.RequestTracker = NewRequestTracker(s.Headers, 1, 3) s.FinalizedAtHeight = make(map[uint64]*flow.Header) - s.FinalizedAtHeight[s.ParentBlock.Height] = &s.ParentBlock - s.FinalizedAtHeight[s.Block.Height] = &s.Block + s.FinalizedAtHeight[s.ParentBlock.Height] = s.ParentBlock + s.FinalizedAtHeight[s.Block.Height] = s.Block // setup blocks cache for protocol state s.Blocks = make(map[flow.Identifier]*flow.Header) - s.Blocks[s.ParentBlock.ID()] = &s.ParentBlock - s.Blocks[s.Block.ID()] = &s.Block - s.Blocks[s.IncorporatedBlock.ID()] = &s.IncorporatedBlock + s.Blocks[s.ParentBlock.ID()] = s.ParentBlock + s.Blocks[s.Block.ID()] = s.Block + s.Blocks[s.IncorporatedBlock.ID()] = s.IncorporatedBlock s.Snapshots = make(map[flow.Identifier]*protocol.Snapshot) // setup identities for each block diff --git a/engine/consensus/approvals/verifying_assignment_collector_test.go b/engine/consensus/approvals/verifying_assignment_collector_test.go index ca8fdc03cfd..ee101e03d45 100644 --- a/engine/consensus/approvals/verifying_assignment_collector_test.go +++ b/engine/consensus/approvals/verifying_assignment_collector_test.go @@ -127,8 +127,8 @@ func (s *AssignmentCollectorTestSuite) TestProcessIncorporatedResult_ReusingCach } } - incorporatedBlock := unittest.BlockHeaderWithParentFixture(&s.Block) - s.Blocks[incorporatedBlock.ID()] = &incorporatedBlock + incorporatedBlock := unittest.BlockHeaderWithParentFixture(s.Block) + s.Blocks[incorporatedBlock.ID()] = incorporatedBlock // at this point we have proposed a seal, let's construct new incorporated result with same assignment // but different incorporated block ID resulting in new seal. @@ -217,7 +217,7 @@ func (s *AssignmentCollectorTestSuite) TestProcessIncorporatedResult() { s.Run("invalid-verifier-identities", func() { // delete identities for Result.BlockID delete(s.IdentitiesCache, s.IncorporatedResult.Result.BlockID) - s.Snapshots[s.IncorporatedResult.Result.BlockID] = unittest.StateSnapshotForKnownBlock(&s.Block, nil) + s.Snapshots[s.IncorporatedResult.Result.BlockID] = unittest.StateSnapshotForKnownBlock(s.Block, nil) collector, err := newVerifyingAssignmentCollector(unittest.Logger(), s.WorkerPool, s.IncorporatedResult.Result, s.State, s.Headers, s.Assigner, s.SealsPL, s.SigHasher, s.Conduit, s.RequestTracker, 1) require.Error(s.T(), err) @@ -237,7 +237,7 @@ func (s *AssignmentCollectorTestSuite) TestProcessIncorporatedResult_InvalidIden state.On("AtBlockID", mock.Anything).Return( func(blockID flow.Identifier) realproto.Snapshot { return unittest.StateSnapshotForKnownBlock( - &s.Block, + s.Block, map[flow.Identifier]*flow.Identity{identity.NodeID: identity}, ) }, @@ -257,7 +257,7 @@ func (s *AssignmentCollectorTestSuite) TestProcessIncorporatedResult_InvalidIden state.On("AtBlockID", mock.Anything).Return( func(blockID flow.Identifier) realproto.Snapshot { return unittest.StateSnapshotForKnownBlock( - &s.Block, + s.Block, map[flow.Identifier]*flow.Identity{identity.NodeID: identity}, ) }, @@ -276,7 +276,7 @@ func (s *AssignmentCollectorTestSuite) TestProcessIncorporatedResult_InvalidIden state.On("AtBlockID", mock.Anything).Return( func(blockID flow.Identifier) realproto.Snapshot { return unittest.StateSnapshotForKnownBlock( - &s.Block, + s.Block, map[flow.Identifier]*flow.Identity{identity.NodeID: identity}, ) }, @@ -323,8 +323,8 @@ func (s *AssignmentCollectorTestSuite) TestRequestMissingApprovals() { incorporatedBlock.Height = lastHeight lastHeight++ - s.Blocks[incorporatedBlock.ID()] = &incorporatedBlock - incorporatedBlocks = append(incorporatedBlocks, &incorporatedBlock) + s.Blocks[incorporatedBlock.ID()] = incorporatedBlock + incorporatedBlocks = append(incorporatedBlocks, incorporatedBlock) } incorporatedResults := make([]*flow.IncorporatedResult, 0, len(incorporatedBlocks)) diff --git a/engine/consensus/compliance/engine_test.go b/engine/consensus/compliance/engine_test.go index 54ccfbbf338..010d2bbbe85 100644 --- a/engine/consensus/compliance/engine_test.go +++ b/engine/consensus/compliance/engine_test.go @@ -90,10 +90,10 @@ func (cs *ComplianceSuite) TestBroadcastProposalWithDelay() { parent := unittest.BlockHeaderFixture() parent.ChainID = "test" parent.Height = 10 - cs.headerDB[parent.ID()] = &parent + cs.headerDB[parent.ID()] = parent // create a block with the parent and store the payload with correct ID - block := unittest.BlockWithParentFixture(&parent) + block := unittest.BlockWithParentFixture(parent) block.Header.ProposerID = cs.myID cs.payloadDB[block.ID()] = block.Payload @@ -217,12 +217,12 @@ func (cs *ComplianceSuite) TestProcessUnsupportedMessageType() { // Tests the whole processing pipeline. func (cs *ComplianceSuite) TestOnFinalizedBlock() { finalizedBlock := unittest.BlockHeaderFixture() - cs.head = &finalizedBlock + cs.head = finalizedBlock *cs.pending = modulemock.PendingBlockBuffer{} cs.pending.On("PruneByView", finalizedBlock.View).Return(nil).Once() cs.pending.On("Size").Return(uint(0)).Once() - cs.engine.OnFinalizedBlock(model.BlockFromFlow(&finalizedBlock, finalizedBlock.View-1)) + cs.engine.OnFinalizedBlock(model.BlockFromFlow(finalizedBlock, finalizedBlock.View-1)) require.Eventually(cs.T(), func() bool { diff --git a/engine/consensus/dkg/reactor_engine_test.go b/engine/consensus/dkg/reactor_engine_test.go index 874dd858e59..fcd214ea307 100644 --- a/engine/consensus/dkg/reactor_engine_test.go +++ b/engine/consensus/dkg/reactor_engine_test.go @@ -106,7 +106,7 @@ func (suite *ReactorEngineSuite_SetupPhase) SetupTest() { suite.blocksByView = make(map[uint64]*flow.Header) for view := suite.dkgStartView; view <= suite.dkgPhase3FinalView; view += dkg.DefaultPollStep { header := unittest.BlockHeaderFixture(unittest.HeaderWithView(view)) - suite.blocksByView[view] = &header + suite.blocksByView[view] = header } suite.firstBlock = suite.blocksByView[100] @@ -345,7 +345,7 @@ func (suite *ReactorEngineSuite_CommittedPhase) SetupTest() { epochQuery.Add(nextEpoch) firstBlock := unittest.BlockHeaderFixture(unittest.HeaderWithView(100)) - suite.firstBlock = &firstBlock + suite.firstBlock = firstBlock suite.snap = new(protocol.Snapshot) suite.snap.On("Epochs").Return(epochQuery) diff --git a/engine/consensus/ingestion/core_test.go b/engine/consensus/ingestion/core_test.go index 85b5bbbb536..7ca7737052e 100644 --- a/engine/consensus/ingestion/core_test.go +++ b/engine/consensus/ingestion/core_test.go @@ -87,7 +87,7 @@ func (suite *IngestionCoreSuite) SetupTest() { // returning everything correctly, using the created header // as head of the protocol state state.On("Final").Return(final) - final.On("Head").Return(&head, nil) + final.On("Head").Return(head, nil) final.On("Identity", mock.Anything).Return( func(nodeID flow.Identifier) *flow.Identity { identity, _ := suite.finalIdentities.ByNodeID(nodeID) @@ -128,14 +128,14 @@ func (suite *IngestionCoreSuite) SetupTest() { ) // we need to return the head as it's also used as reference block - headers.On("ByBlockID", head.ID()).Return(&head, nil) + headers.On("ByBlockID", head.ID()).Return(head, nil) // only used for metrics, nobody cares pool.On("Size").Return(uint(0)) ingest := NewCore(unittest.Logger(), tracer, metrics, state, headers, pool) - suite.head = &head + suite.head = head suite.final = final suite.ref = ref suite.headers = headers @@ -219,7 +219,7 @@ func (suite *IngestionCoreSuite) TestOnGuaranteeExpired() { // create an alternative block header := unittest.BlockHeaderFixture() header.Height = suite.head.Height - flow.DefaultTransactionExpiry - 1 - suite.headers.On("ByBlockID", header.ID()).Return(&header, nil) + suite.headers.On("ByBlockID", header.ID()).Return(header, nil) // create a guarantee signed by the collection node and referencing the // current head of the protocol state diff --git a/engine/consensus/matching/engine_test.go b/engine/consensus/matching/engine_test.go index 7f00062c5a6..accc1021018 100644 --- a/engine/consensus/matching/engine_test.go +++ b/engine/consensus/matching/engine_test.go @@ -65,9 +65,9 @@ func (s *MatchingEngineSuite) SetupTest() { func (s *MatchingEngineSuite) TestOnFinalizedBlock() { finalizedBlock := unittest.BlockHeaderFixture() - s.state.On("Final").Return(unittest.StateSnapshotForKnownBlock(&finalizedBlock, nil)) + s.state.On("Final").Return(unittest.StateSnapshotForKnownBlock(finalizedBlock, nil)) s.core.On("OnBlockFinalization").Return(nil).Once() - s.engine.OnFinalizedBlock(model.BlockFromFlow(&finalizedBlock, finalizedBlock.View-1)) + s.engine.OnFinalizedBlock(model.BlockFromFlow(finalizedBlock, finalizedBlock.View-1)) // matching engine has at least 100ms ticks for processing events time.Sleep(1 * time.Second) @@ -93,7 +93,7 @@ func (s *MatchingEngineSuite) TestOnBlockIncorporated() { } s.index.On("ByBlockID", incorporatedBlockID).Return(index, nil) - s.engine.OnBlockIncorporated(model.BlockFromFlow(&incorporatedBlock, incorporatedBlock.View-1)) + s.engine.OnBlockIncorporated(model.BlockFromFlow(incorporatedBlock, incorporatedBlock.View-1)) // matching engine has at least 100ms ticks for processing events time.Sleep(1 * time.Second) diff --git a/engine/consensus/sealing/core_test.go b/engine/consensus/sealing/core_test.go index 67c54d0b70e..16660916f7d 100644 --- a/engine/consensus/sealing/core_test.go +++ b/engine/consensus/sealing/core_test.go @@ -58,7 +58,7 @@ func (s *ApprovalProcessingCoreTestSuite) SetupTest() { s.rootHeader = unittest.GenesisFixture().Header params := new(mockstate.Params) - s.State.On("Sealed").Return(unittest.StateSnapshotForKnownBlock(&s.ParentBlock, nil)).Maybe() + s.State.On("Sealed").Return(unittest.StateSnapshotForKnownBlock(s.ParentBlock, nil)).Maybe() s.State.On("Params").Return(params) params.On("Root").Return( func() *flow.Header { return s.rootHeader }, @@ -84,7 +84,7 @@ func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_RejectOutdatedApp err := s.core.processApproval(approval) require.NoError(s.T(), err) - seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(&s.Block)) + seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(s.Block)) s.sealsDB.On("HighestInFork", mock.Anything).Return(seal, nil).Once() err = s.core.ProcessFinalizedBlock(s.Block.ID()) @@ -98,7 +98,7 @@ func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_RejectOutdatedApp // TestOnBlockFinalized_RejectOutdatedExecutionResult tests that incorporated result will be rejected as outdated // if the block which is targeted by execution result is already sealed. func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_RejectOutdatedExecutionResult() { - seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(&s.Block)) + seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(s.Block)) s.sealsDB.On("HighestInFork", mock.Anything).Return(seal, nil).Once() err := s.core.ProcessFinalizedBlock(s.Block.ID()) @@ -131,11 +131,11 @@ func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_RejectUnverifiabl // <- B_2 // B_1 is finalized rendering B_2 as orphan, submitting IR[ER[A], B_1] is a success, submitting IR[ER[A], B_2] is an outdated incorporated result func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_RejectOrphanIncorporatedResults() { - blockB1 := unittest.BlockHeaderWithParentFixture(&s.Block) - blockB2 := unittest.BlockHeaderWithParentFixture(&s.Block) + blockB1 := unittest.BlockHeaderWithParentFixture(s.Block) + blockB2 := unittest.BlockHeaderWithParentFixture(s.Block) - s.Blocks[blockB1.ID()] = &blockB1 - s.Blocks[blockB2.ID()] = &blockB2 + s.Blocks[blockB1.ID()] = blockB1 + s.Blocks[blockB2.ID()] = blockB2 IR1 := unittest.IncorporatedResult.Fixture( unittest.IncorporatedResult.WithIncorporatedBlockID(blockB1.ID()), @@ -145,9 +145,9 @@ func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_RejectOrphanIncor unittest.IncorporatedResult.WithIncorporatedBlockID(blockB2.ID()), unittest.IncorporatedResult.WithResult(s.IncorporatedResult.Result)) - s.MarkFinalized(&blockB1) + s.MarkFinalized(blockB1) - seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(&s.ParentBlock)) + seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(s.ParentBlock)) s.sealsDB.On("HighestInFork", mock.Anything).Return(seal, nil).Once() // blockB1 becomes finalized @@ -163,17 +163,17 @@ func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_RejectOrphanIncor } func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_RejectOldFinalizedBlock() { - blockB1 := unittest.BlockHeaderWithParentFixture(&s.Block) - blockB2 := unittest.BlockHeaderWithParentFixture(&blockB1) + blockB1 := unittest.BlockHeaderWithParentFixture(s.Block) + blockB2 := unittest.BlockHeaderWithParentFixture(blockB1) - s.Blocks[blockB1.ID()] = &blockB1 - s.Blocks[blockB2.ID()] = &blockB2 + s.Blocks[blockB1.ID()] = blockB1 + s.Blocks[blockB2.ID()] = blockB2 - seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(&s.Block)) + seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(s.Block)) // should only call it once s.sealsDB.On("HighestInFork", mock.Anything).Return(seal, nil).Once() - s.MarkFinalized(&blockB1) - s.MarkFinalized(&blockB2) + s.MarkFinalized(blockB1) + s.MarkFinalized(blockB2) // blockB1 becomes finalized err := s.core.ProcessFinalizedBlock(blockB2.ID()) @@ -190,8 +190,8 @@ func (s *ApprovalProcessingCoreTestSuite) TestProcessFinalizedBlock_CollectorsCl numResults := uint(10) for i := uint(0); i < numResults; i++ { // all results incorporated in different blocks - incorporatedBlock := unittest.BlockHeaderWithParentFixture(&s.IncorporatedBlock) - s.Blocks[incorporatedBlock.ID()] = &incorporatedBlock + incorporatedBlock := unittest.BlockHeaderWithParentFixture(s.IncorporatedBlock) + s.Blocks[incorporatedBlock.ID()] = incorporatedBlock // create different incorporated results for same block ID result := unittest.ExecutionResultFixture() result.BlockID = blockID @@ -204,15 +204,15 @@ func (s *ApprovalProcessingCoreTestSuite) TestProcessFinalizedBlock_CollectorsCl } require.Equal(s.T(), uint64(numResults), s.core.collectorTree.GetSize()) - candidate := unittest.BlockHeaderWithParentFixture(&s.Block) - s.Blocks[candidate.ID()] = &candidate + candidate := unittest.BlockHeaderWithParentFixture(s.Block) + s.Blocks[candidate.ID()] = candidate // candidate becomes new sealed and finalized block, it means that // we will need to cleanup our tree till new height, removing all outdated collectors - seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(&candidate)) + seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(candidate)) s.sealsDB.On("HighestInFork", mock.Anything).Return(seal, nil).Once() - s.MarkFinalized(&candidate) + s.MarkFinalized(candidate) err := s.core.ProcessFinalizedBlock(candidate.ID()) require.NoError(s.T(), err) require.Equal(s.T(), uint64(0), s.core.collectorTree.GetSize()) @@ -338,22 +338,22 @@ func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_EmergencySealing( }, ).Return(true, nil).Once() - seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(&s.ParentBlock)) + seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(s.ParentBlock)) s.sealsDB.On("HighestInFork", mock.Anything).Return(seal, nil).Times(approvals.DefaultEmergencySealingThresholdForFinalization) - s.State.On("Sealed").Return(unittest.StateSnapshotForKnownBlock(&s.ParentBlock, nil)) + s.State.On("Sealed").Return(unittest.StateSnapshotForKnownBlock(s.ParentBlock, nil)) err = s.core.ProcessIncorporatedResult(s.IncorporatedResult) require.NoError(s.T(), err) - lastFinalizedBlock := &s.IncorporatedBlock + lastFinalizedBlock := s.IncorporatedBlock s.MarkFinalized(lastFinalizedBlock) for i := 0; i < approvals.DefaultEmergencySealingThresholdForFinalization; i++ { finalizedBlock := unittest.BlockHeaderWithParentFixture(lastFinalizedBlock) - s.Blocks[finalizedBlock.ID()] = &finalizedBlock - s.MarkFinalized(&finalizedBlock) + s.Blocks[finalizedBlock.ID()] = finalizedBlock + s.MarkFinalized(finalizedBlock) err := s.core.ProcessFinalizedBlock(finalizedBlock.ID()) require.NoError(s.T(), err) - lastFinalizedBlock = &finalizedBlock + lastFinalizedBlock = finalizedBlock } s.SealsPL.AssertExpectations(s.T()) @@ -369,7 +369,7 @@ func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_ProcessingOrphanA forkResults := make([][]*flow.ExecutionResult, len(forks)) for forkIndex := range forks { - forks[forkIndex] = unittest.ChainFixtureFrom(forkIndex+2, &s.ParentBlock) + forks[forkIndex] = unittest.ChainFixtureFrom(forkIndex+2, s.ParentBlock) fork := forks[forkIndex] previousResult := s.IncorporatedResult.Result @@ -398,7 +398,7 @@ func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_ProcessingOrphanA } // same block sealed - seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(&s.ParentBlock)) + seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(s.ParentBlock)) s.sealsDB.On("HighestInFork", mock.Anything).Return(seal, nil).Once() // block B_1 becomes finalized @@ -437,7 +437,7 @@ func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_ExtendingUnproces forks := make([][]*flow.Block, 2) for forkIndex := range forks { - forks[forkIndex] = unittest.ChainFixtureFrom(forkIndex+3, &s.Block) + forks[forkIndex] = unittest.ChainFixtureFrom(forkIndex+3, s.Block) fork := forks[forkIndex] for _, block := range fork { s.Blocks[block.ID()] = block.Header @@ -448,7 +448,7 @@ func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_ExtendingUnproces finalized := forks[1][0].Header s.MarkFinalized(finalized) - seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(&s.ParentBlock)) + seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(s.ParentBlock)) s.sealsDB.On("HighestInFork", mock.Anything).Return(seal, nil).Once() // finalize block B @@ -487,21 +487,21 @@ func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_ExtendingUnproces // TestOnBlockFinalized_ExtendingSealedResult tests if assignment collector tree accepts collector which extends sealed result func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_ExtendingSealedResult() { - seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(&s.Block)) + seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(s.Block)) s.sealsDB.On("HighestInFork", mock.Anything).Return(seal, nil).Once() - unsealedBlock := unittest.BlockHeaderWithParentFixture(&s.Block) - s.Blocks[unsealedBlock.ID()] = &unsealedBlock + unsealedBlock := unittest.BlockHeaderWithParentFixture(s.Block) + s.Blocks[unsealedBlock.ID()] = unsealedBlock s.IdentitiesCache[unsealedBlock.ID()] = s.AuthorizedVerifiers result := unittest.ExecutionResultFixture(unittest.WithPreviousResult(*s.IncorporatedResult.Result)) result.BlockID = unsealedBlock.ID() - s.MarkFinalized(&unsealedBlock) + s.MarkFinalized(unsealedBlock) err := s.core.ProcessFinalizedBlock(unsealedBlock.ID()) require.NoError(s.T(), err) - incorporatedBlock := unittest.BlockHeaderWithParentFixture(&unsealedBlock) - s.Blocks[incorporatedBlock.ID()] = &incorporatedBlock + incorporatedBlock := unittest.BlockHeaderWithParentFixture(unsealedBlock) + s.Blocks[incorporatedBlock.ID()] = incorporatedBlock s.IdentitiesCache[incorporatedBlock.ID()] = s.AuthorizedVerifiers IR := unittest.IncorporatedResult.Fixture( unittest.IncorporatedResult.WithIncorporatedBlockID(incorporatedBlock.ID()), @@ -526,7 +526,7 @@ func (s *ApprovalProcessingCoreTestSuite) TestRequestPendingApprovals() { // create blocks unsealedFinalizedBlocks := make([]flow.Block, 0, n) - parentBlock := &s.ParentBlock + parentBlock := s.ParentBlock for i := 0; i < n; i++ { block := unittest.BlockWithParentFixture(parentBlock) s.Blocks[block.ID()] = block.Header @@ -603,10 +603,10 @@ func (s *ApprovalProcessingCoreTestSuite) TestRequestPendingApprovals() { } // sealed block doesn't change - seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(&s.ParentBlock)) + seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(s.ParentBlock)) s.sealsDB.On("HighestInFork", mock.Anything).Return(seal, nil) - s.State.On("Sealed").Return(unittest.StateSnapshotForKnownBlock(&s.ParentBlock, nil)) + s.State.On("Sealed").Return(unittest.StateSnapshotForKnownBlock(s.ParentBlock, nil)) // start delivering finalization events lastProcessedIndex := 0 @@ -679,7 +679,7 @@ func (s *ApprovalProcessingCoreTestSuite) TestRepopulateAssignmentCollectorTree( s.sealsDB.On("HighestInFork", s.IncorporatedBlock.ID()).Return( unittest.Seal.Fixture( - unittest.Seal.WithBlock(&s.ParentBlock)), nil) + unittest.Seal.WithBlock(s.ParentBlock)), nil) // the incorporated block contains the result for the sealing candidate block incorporatedBlockPayload := unittest.PayloadFixture( @@ -697,7 +697,7 @@ func (s *ApprovalProcessingCoreTestSuite) TestRepopulateAssignmentCollectorTree( // two forks for i := 0; i < 2; i++ { - fork := unittest.ChainFixtureFrom(i+3, &s.IncorporatedBlock) + fork := unittest.ChainFixtureFrom(i+3, s.IncorporatedBlock) prevResult := s.IncorporatedResult.Result // create execution results for all blocks except last one, since it won't be valid by definition for blockIndex, block := range fork { @@ -736,7 +736,7 @@ func (s *ApprovalProcessingCoreTestSuite) TestRepopulateAssignmentCollectorTree( } // ValidDescendants has to return all valid descendants from finalized block - finalSnapShot := unittest.StateSnapshotForKnownBlock(&s.IncorporatedBlock, nil) + finalSnapShot := unittest.StateSnapshotForKnownBlock(s.IncorporatedBlock, nil) finalSnapShot.On("ValidDescendants").Return(blockChildren, nil) s.State.On("Final").Return(finalSnapShot) @@ -772,12 +772,12 @@ func (s *ApprovalProcessingCoreTestSuite) TestRepopulateAssignmentCollectorTree_ payloads := &storage.Payloads{} // setup mocks - s.rootHeader = &s.IncorporatedBlock + s.rootHeader = s.IncorporatedBlock expectedResults := []*flow.IncorporatedResult{s.IncorporatedResult} s.sealsDB.On("HighestInFork", s.IncorporatedBlock.ID()).Return( unittest.Seal.Fixture( - unittest.Seal.WithBlock(&s.ParentBlock)), nil) + unittest.Seal.WithBlock(s.ParentBlock)), nil) // the incorporated block contains the result for the sealing candidate block incorporatedBlockPayload := unittest.PayloadFixture( @@ -808,13 +808,13 @@ func (s *ApprovalProcessingCoreTestSuite) TestRepopulateAssignmentCollectorTree_ finalSnapShot.On("SealingSegment").Return( &flow.SealingSegment{ Blocks: []*flow.Block{{ - Header: &s.Block, + Header: s.Block, Payload: &candidatePayload, }, { - Header: &s.ParentBlock, + Header: s.ParentBlock, Payload: &flow.Payload{}, }, { - Header: &s.IncorporatedBlock, + Header: s.IncorporatedBlock, Payload: &incorporatedBlockPayload, }}, }, nil) diff --git a/engine/consensus/sealing/engine_test.go b/engine/consensus/sealing/engine_test.go index 35ade14f14d..05b36a19221 100644 --- a/engine/consensus/sealing/engine_test.go +++ b/engine/consensus/sealing/engine_test.go @@ -87,9 +87,9 @@ func (s *SealingEngineSuite) TestOnFinalizedBlock() { finalizedBlock := unittest.BlockHeaderFixture() finalizedBlockID := finalizedBlock.ID() - s.state.On("Final").Return(unittest.StateSnapshotForKnownBlock(&finalizedBlock, nil)) + s.state.On("Final").Return(unittest.StateSnapshotForKnownBlock(finalizedBlock, nil)) s.core.On("ProcessFinalizedBlock", finalizedBlockID).Return(nil).Once() - s.engine.OnFinalizedBlock(model.BlockFromFlow(&finalizedBlock, finalizedBlock.View-1)) + s.engine.OnFinalizedBlock(model.BlockFromFlow(finalizedBlock, finalizedBlock.View-1)) // matching engine has at least 100ms ticks for processing events time.Sleep(1 * time.Second) @@ -101,7 +101,7 @@ func (s *SealingEngineSuite) TestOnFinalizedBlock() { // Tests the whole processing pipeline. func (s *SealingEngineSuite) TestOnBlockIncorporated() { parentBlock := unittest.BlockHeaderFixture() - incorporatedBlock := unittest.BlockHeaderWithParentFixture(&parentBlock) + incorporatedBlock := unittest.BlockHeaderWithParentFixture(parentBlock) incorporatedBlockID := incorporatedBlock.ID() // setup payload fixture payload := unittest.PayloadFixture(unittest.WithAllTheFixins) @@ -118,10 +118,10 @@ func (s *SealingEngineSuite) TestOnBlockIncorporated() { // setup headers storage headers := &mockstorage.Headers{} - headers.On("ByBlockID", incorporatedBlockID).Return(&incorporatedBlock, nil).Once() + headers.On("ByBlockID", incorporatedBlockID).Return(incorporatedBlock, nil).Once() s.engine.headers = headers - s.engine.OnBlockIncorporated(model.BlockFromFlow(&incorporatedBlock, incorporatedBlock.View-1)) + s.engine.OnBlockIncorporated(model.BlockFromFlow(incorporatedBlock, incorporatedBlock.View-1)) // matching engine has at least 100ms ticks for processing events time.Sleep(1 * time.Second) diff --git a/engine/execution/computation/manager_test.go b/engine/execution/computation/manager_test.go index 58565b7b9a3..84531128834 100644 --- a/engine/execution/computation/manager_test.go +++ b/engine/execution/computation/manager_test.go @@ -246,7 +246,7 @@ func TestExecuteScript(t *testing.T) { require.NoError(t, err) header := unittest.BlockHeaderFixture() - _, err = engine.ExecuteScript(context.Background(), script, nil, &header, scriptView) + _, err = engine.ExecuteScript(context.Background(), script, nil, header, scriptView) require.NoError(t, err) } @@ -300,7 +300,7 @@ func TestExecuteScript_BalanceScriptFailsIfViewIsEmpty(t *testing.T) { require.NoError(t, err) header := unittest.BlockHeaderFixture() - _, err = engine.ExecuteScript(context.Background(), script, nil, &header, scriptView) + _, err = engine.ExecuteScript(context.Background(), script, nil, header, scriptView) require.ErrorContains(t, err, "error getting register") } @@ -334,7 +334,7 @@ func TestExecuteScripPanicsAreHandled(t *testing.T) { edCache) require.NoError(t, err) - _, err = manager.ExecuteScript(context.Background(), []byte("whatever"), nil, &header, noopView()) + _, err = manager.ExecuteScript(context.Background(), []byte("whatever"), nil, header, noopView()) require.Error(t, err) @@ -371,7 +371,7 @@ func TestExecuteScript_LongScriptsAreLogged(t *testing.T) { edCache) require.NoError(t, err) - _, err = manager.ExecuteScript(context.Background(), []byte("whatever"), nil, &header, noopView()) + _, err = manager.ExecuteScript(context.Background(), []byte("whatever"), nil, header, noopView()) require.NoError(t, err) @@ -408,7 +408,7 @@ func TestExecuteScript_ShortScriptsAreNotLogged(t *testing.T) { edCache) require.NoError(t, err) - _, err = manager.ExecuteScript(context.Background(), []byte("whatever"), nil, &header, noopView()) + _, err = manager.ExecuteScript(context.Background(), []byte("whatever"), nil, header, noopView()) require.NoError(t, err) @@ -501,7 +501,7 @@ func TestExecuteScriptTimeout(t *testing.T) { `) header := unittest.BlockHeaderFixture() - value, err := manager.ExecuteScript(context.Background(), script, nil, &header, noopView()) + value, err := manager.ExecuteScript(context.Background(), script, nil, header, noopView()) require.Error(t, err) require.Nil(t, value) @@ -547,7 +547,7 @@ func TestExecuteScriptCancelled(t *testing.T) { wg.Add(1) go func() { header := unittest.BlockHeaderFixture() - value, err = manager.ExecuteScript(reqCtx, script, nil, &header, noopView()) + value, err = manager.ExecuteScript(reqCtx, script, nil, header, noopView()) wg.Done() }() cancel() @@ -603,7 +603,7 @@ func TestScriptStorageMutationsDiscarded(t *testing.T) { header := unittest.BlockHeaderFixture() scriptView := view.NewChild() - _, err = manager.ExecuteScript(context.Background(), script, [][]byte{jsoncdc.MustEncode(address)}, &header, scriptView) + _, err = manager.ExecuteScript(context.Background(), script, [][]byte{jsoncdc.MustEncode(address)}, header, scriptView) require.NoError(t, err) diff --git a/engine/execution/ingestion/engine_test.go b/engine/execution/ingestion/engine_test.go index 4ac4406e65a..a6ac167e824 100644 --- a/engine/execution/ingestion/engine_test.go +++ b/engine/execution/ingestion/engine_test.go @@ -414,7 +414,7 @@ func TestExecuteOneBlock(t *testing.T) { // A <- B blockA := unittest.BlockHeaderFixture() - blockB := unittest.ExecutableBlockFixtureWithParent(nil, &blockA) + blockB := unittest.ExecutableBlockFixtureWithParent(nil, blockA) blockB.StartState = unittest.StateCommitmentPointerFixture() ctx.mockHasWeightAtBlockID(blockA.ID(), true) @@ -429,7 +429,7 @@ func TestExecuteOneBlock(t *testing.T) { ctx.mockStateCommitsWithMap(commits) ctx.state.On("Sealed").Return(ctx.snapshot) - ctx.snapshot.On("Head").Return(&blockA, nil) + ctx.snapshot.On("Head").Return(blockA, nil) ctx.assertSuccessfulBlockComputation(commits, func(blockID flow.Identifier, commit flow.StateCommitment) { wg.Done() @@ -475,7 +475,7 @@ func Test_OnlyHeadOfTheQueueIsExecuted(t *testing.T) { }) // last executed block - it will be re-queued regardless of state commit - blockB := unittest.ExecutableBlockFixtureWithParent(nil, &blockA) + blockB := unittest.ExecutableBlockFixtureWithParent(nil, blockA) blockB.StartState = unittest.StateCommitmentPointerFixture() // finalized block - it can be executed in parallel, as blockB has been executed @@ -518,7 +518,7 @@ func Test_OnlyHeadOfTheQueueIsExecuted(t *testing.T) { ctx.executionState.On("StateCommitmentByBlockID", mock.Anything, mock.Anything).Return(nil, storageerr.ErrNotFound) ctx.state.On("Sealed").Return(ctx.snapshot) - ctx.snapshot.On("Head").Return(&blockA, nil) + ctx.snapshot.On("Head").Return(blockA, nil) wgB := sync.WaitGroup{} wgB.Add(1) @@ -610,7 +610,7 @@ func TestBlocksArentExecutedMultipleTimes_multipleBlockEnqueue(t *testing.T) { // A <- B <- C blockA := unittest.BlockHeaderFixture() - blockB := unittest.ExecutableBlockFixtureWithParent(nil, &blockA) + blockB := unittest.ExecutableBlockFixtureWithParent(nil, blockA) blockB.StartState = unittest.StateCommitmentPointerFixture() //blockCstartState := unittest.StateCommitmentFixture() @@ -632,7 +632,7 @@ func TestBlocksArentExecutedMultipleTimes_multipleBlockEnqueue(t *testing.T) { ctx.mockStateCommitsWithMap(commits) ctx.state.On("Sealed").Return(ctx.snapshot) - ctx.snapshot.On("Head").Return(&blockA, nil) + ctx.snapshot.On("Head").Return(blockA, nil) // wait finishing execution until all the blocks are sent to execution wgPut := sync.WaitGroup{} @@ -715,7 +715,7 @@ func TestBlocksArentExecutedMultipleTimes_collectionArrival(t *testing.T) { // A (0 collection) <- B (0 collection) <- C (0 collection) <- D (1 collection) blockA := unittest.BlockHeaderFixture() - blockB := unittest.ExecutableBlockFixtureWithParent(nil, &blockA) + blockB := unittest.ExecutableBlockFixtureWithParent(nil, blockA) blockB.StartState = unittest.StateCommitmentPointerFixture() collectionIdentities := ctx.identities.Filter(filter.HasRole(flow.RoleCollection)) @@ -758,7 +758,7 @@ func TestBlocksArentExecutedMultipleTimes_collectionArrival(t *testing.T) { ctx.mockSnapshot(blockD.Block.Header, ctx.identities) ctx.state.On("Sealed").Return(ctx.snapshot) - ctx.snapshot.On("Head").Return(&blockA, nil) + ctx.snapshot.On("Head").Return(blockA, nil) // wait to control parent (block B) execution until we are ready wgB := sync.WaitGroup{} @@ -853,7 +853,7 @@ func TestExecuteBlockInOrder(t *testing.T) { blockSealed := unittest.BlockHeaderFixture() blocks := make(map[string]*entity.ExecutableBlock) - blocks["A"] = unittest.ExecutableBlockFixtureWithParent(nil, &blockSealed) + blocks["A"] = unittest.ExecutableBlockFixtureWithParent(nil, blockSealed) blocks["A"].StartState = unittest.StateCommitmentPointerFixture() blocks["B"] = unittest.ExecutableBlockFixtureWithParent(nil, blocks["A"].Block.Header) @@ -879,7 +879,7 @@ func TestExecuteBlockInOrder(t *testing.T) { ctx.mockHasWeightAtBlockID(blocks["A"].ID(), true) ctx.state.On("Sealed").Return(ctx.snapshot) // a receipt for sealed block won't be broadcasted - ctx.snapshot.On("Head").Return(&blockSealed, nil) + ctx.snapshot.On("Head").Return(blockSealed, nil) ctx.mockSnapshot(blocks["A"].Block.Header, unittest.IdentityListFixture(1)) ctx.mockSnapshot(blocks["B"].Block.Header, unittest.IdentityListFixture(1)) ctx.mockSnapshot(blocks["C"].Block.Header, unittest.IdentityListFixture(1)) @@ -1091,7 +1091,7 @@ func TestUnauthorizedNodeDoesNotBroadcastReceipts(t *testing.T) { blockSealed := unittest.BlockHeaderFixture() blocks := make(map[string]*entity.ExecutableBlock) - blocks["A"] = unittest.ExecutableBlockFixtureWithParent(nil, &blockSealed) + blocks["A"] = unittest.ExecutableBlockFixtureWithParent(nil, blockSealed) blocks["A"].StartState = unittest.StateCommitmentPointerFixture() blocks["B"] = unittest.ExecutableBlockFixtureWithParent(nil, blocks["A"].Block.Header) @@ -1120,7 +1120,7 @@ func TestUnauthorizedNodeDoesNotBroadcastReceipts(t *testing.T) { // will be executed. ctx.state.On("Sealed").Return(ctx.snapshot) // a receipt for sealed block won't be broadcasted - ctx.snapshot.On("Head").Return(&blockSealed, nil) + ctx.snapshot.On("Head").Return(blockSealed, nil) ctx.mockHasWeightAtBlockID(blocks["A"].ID(), true) identity := *ctx.identity diff --git a/engine/verification/assigner/engine_test.go b/engine/verification/assigner/engine_test.go index 2046cb037cd..d537b21c261 100644 --- a/engine/verification/assigner/engine_test.go +++ b/engine/verification/assigner/engine_test.go @@ -96,7 +96,7 @@ func createContainerBlock(options ...func(result *flow.ExecutionResult, assignme // container block header := unittest.BlockHeaderFixture() block := &flow.Block{ - Header: &header, + Header: header, Payload: &flow.Payload{ Receipts: []*flow.ExecutionReceiptMeta{receipt.Meta()}, Results: []*flow.ExecutionResult{&receipt.ExecutionResult}, diff --git a/engine/verification/utils/unittest/fixture.go b/engine/verification/utils/unittest/fixture.go index 6656e538bbe..c54e7dd5f3d 100644 --- a/engine/verification/utils/unittest/fixture.go +++ b/engine/verification/utils/unittest/fixture.go @@ -439,7 +439,7 @@ func ExecutionReceiptsFromParentBlockFixture(t *testing.T, parent *flow.Header, func ExecutionResultFromParentBlockFixture(t *testing.T, parent *flow.Header, builder *CompleteExecutionReceiptBuilder) (*flow.ExecutionResult, *ExecutionReceiptData) { refBlkHeader := unittest.BlockHeaderWithParentFixture(parent) - return ExecutionResultFixture(t, builder.chunksCount, builder.chain, &refBlkHeader, builder.clusterCommittee) + return ExecutionResultFixture(t, builder.chunksCount, builder.chain, refBlkHeader, builder.clusterCommittee) } // ContainerBlockFixture builds and returns a block that contains input execution receipts. diff --git a/engine/verification/utils/unittest/helper.go b/engine/verification/utils/unittest/helper.go index fcb2008fd35..83b130d68c3 100644 --- a/engine/verification/utils/unittest/helper.go +++ b/engine/verification/utils/unittest/helper.go @@ -409,7 +409,7 @@ func MockLastSealedHeight(state *mockprotocol.State, height uint64) { header := unittest.BlockHeaderFixture() header.Height = height state.On("Sealed").Return(snapshot) - snapshot.On("Head").Return(&header, nil) + snapshot.On("Head").Return(header, nil) } func NewVerificationHappyPathTest(t *testing.T, diff --git a/fvm/blocks_test.go b/fvm/blocks_test.go index 0b861570517..cfc88180fe8 100644 --- a/fvm/blocks_test.go +++ b/fvm/blocks_test.go @@ -11,7 +11,7 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) -func doTest(t *testing.T, f func(*testing.T, *storageMock.Headers, Blocks, flow.Header)) func(*testing.T) { +func doTest(t *testing.T, f func(*testing.T, *storageMock.Headers, Blocks, *flow.Header)) func(*testing.T) { return func(t *testing.T) { headers := new(storageMock.Headers) @@ -27,66 +27,66 @@ func doTest(t *testing.T, f func(*testing.T, *storageMock.Headers, Blocks, flow. func Test_BlockFinder_ReturnsHeaderIfSameHeight(t *testing.T) { - t.Run("returns header is height is the same", doTest(t, func(t *testing.T, headers *storageMock.Headers, blockFinder Blocks, header flow.Header) { - heightFrom, err := blockFinder.ByHeightFrom(10, &header) + t.Run("returns header is height is the same", doTest(t, func(t *testing.T, headers *storageMock.Headers, blockFinder Blocks, header *flow.Header) { + heightFrom, err := blockFinder.ByHeightFrom(10, header) require.NoError(t, err) - require.Equal(t, header, *heightFrom) + require.Equal(t, *header, *heightFrom) })) - t.Run("nil header defaults to ByHeight", doTest(t, func(t *testing.T, headers *storageMock.Headers, blockFinder Blocks, header flow.Header) { + t.Run("nil header defaults to ByHeight", doTest(t, func(t *testing.T, headers *storageMock.Headers, blockFinder Blocks, header *flow.Header) { - headers.On("ByHeight", uint64(10)).Return(&header, nil) + headers.On("ByHeight", uint64(10)).Return(header, nil) heightFrom, err := blockFinder.ByHeightFrom(10, nil) require.NoError(t, err) - require.Equal(t, header, *heightFrom) + require.Equal(t, *header, *heightFrom) })) - t.Run("follows blocks chain", doTest(t, func(t *testing.T, headers *storageMock.Headers, blockFinder Blocks, header flow.Header) { + t.Run("follows blocks chain", doTest(t, func(t *testing.T, headers *storageMock.Headers, blockFinder Blocks, header *flow.Header) { header0 := unittest.BlockHeaderFixture() header0.Height = 0 - header1 := unittest.BlockHeaderWithParentFixture(&header0) - header2 := unittest.BlockHeaderWithParentFixture(&header1) - header3 := unittest.BlockHeaderWithParentFixture(&header2) - header4 := unittest.BlockHeaderWithParentFixture(&header3) - header5 := unittest.BlockHeaderWithParentFixture(&header4) + header1 := unittest.BlockHeaderWithParentFixture(header0) + header2 := unittest.BlockHeaderWithParentFixture(header1) + header3 := unittest.BlockHeaderWithParentFixture(header2) + header4 := unittest.BlockHeaderWithParentFixture(header3) + header5 := unittest.BlockHeaderWithParentFixture(header4) - headers.On("ByBlockID", header4.ID()).Return(&header4, nil) + headers.On("ByBlockID", header4.ID()).Return(header4, nil) headers.On("ByHeight", uint64(4)).Return(nil, storage.ErrNotFound) - headers.On("ByBlockID", header3.ID()).Return(&header3, nil) + headers.On("ByBlockID", header3.ID()).Return(header3, nil) headers.On("ByHeight", uint64(3)).Return(nil, storage.ErrNotFound) - headers.On("ByBlockID", header2.ID()).Return(&header2, nil) + headers.On("ByBlockID", header2.ID()).Return(header2, nil) headers.On("ByHeight", uint64(2)).Return(nil, storage.ErrNotFound) - headers.On("ByBlockID", header1.ID()).Return(&header1, nil) + headers.On("ByBlockID", header1.ID()).Return(header1, nil) headers.On("ByHeight", uint64(1)).Return(nil, storage.ErrNotFound) - headers.On("ByBlockID", header0.ID()).Return(&header0, nil) + headers.On("ByBlockID", header0.ID()).Return(header0, nil) //headers.On("ByHeight", uint64(0)).Return(nil, storage.ErrNotFound) - heightFrom, err := blockFinder.ByHeightFrom(0, &header5) + heightFrom, err := blockFinder.ByHeightFrom(0, header5) require.NoError(t, err) - require.Equal(t, header0, *heightFrom) + require.Equal(t, *header0, *heightFrom) })) - t.Run("skips heights once it get to finalized chain", doTest(t, func(t *testing.T, headers *storageMock.Headers, blockFinder Blocks, header flow.Header) { + t.Run("skips heights once it get to finalized chain", doTest(t, func(t *testing.T, headers *storageMock.Headers, blockFinder Blocks, header *flow.Header) { header0 := unittest.BlockHeaderFixture() header0.Height = 0 - header1 := unittest.BlockHeaderWithParentFixture(&header0) - header2 := unittest.BlockHeaderWithParentFixture(&header1) - header3 := unittest.BlockHeaderWithParentFixture(&header2) - header4 := unittest.BlockHeaderWithParentFixture(&header3) - header5 := unittest.BlockHeaderWithParentFixture(&header4) + header1 := unittest.BlockHeaderWithParentFixture(header0) + header2 := unittest.BlockHeaderWithParentFixture(header1) + header3 := unittest.BlockHeaderWithParentFixture(header2) + header4 := unittest.BlockHeaderWithParentFixture(header3) + header5 := unittest.BlockHeaderWithParentFixture(header4) - headers.On("ByBlockID", header4.ID()).Return(&header4, nil) + headers.On("ByBlockID", header4.ID()).Return(header4, nil) headers.On("ByHeight", uint64(4)).Return(nil, storage.ErrNotFound) - headers.On("ByBlockID", header3.ID()).Return(&header3, nil) - headers.On("ByHeight", uint64(3)).Return(&header3, nil) - headers.On("ByHeight", uint64(0)).Return(&header0, nil) + headers.On("ByBlockID", header3.ID()).Return(header3, nil) + headers.On("ByHeight", uint64(3)).Return(header3, nil) + headers.On("ByHeight", uint64(0)).Return(header0, nil) - heightFrom, err := blockFinder.ByHeightFrom(0, &header5) + heightFrom, err := blockFinder.ByHeightFrom(0, header5) require.NoError(t, err) - require.Equal(t, header0, *heightFrom) + require.Equal(t, *header0, *heightFrom) })) } diff --git a/fvm/fvm_blockcontext_test.go b/fvm/fvm_blockcontext_test.go index 85b6944b150..aefc4cb9fcc 100644 --- a/fvm/fvm_blockcontext_test.go +++ b/fvm/fvm_blockcontext_test.go @@ -1551,12 +1551,12 @@ func TestBlockContext_UnsafeRandom(t *testing.T) { chain, vm := createChainAndVm(flow.Mainnet) - header := flow.Header{Height: 42} + header := &flow.Header{Height: 42} ctx := fvm.NewContext( zerolog.Nop(), fvm.WithChain(chain), - fvm.WithBlockHeader(&header), + fvm.WithBlockHeader(header), fvm.WithCadenceLogging(true), ) diff --git a/fvm/transactionInvoker_test.go b/fvm/transactionInvoker_test.go index b5d1deb1990..9a0b7da6bb5 100644 --- a/fvm/transactionInvoker_test.go +++ b/fvm/transactionInvoker_test.go @@ -285,7 +285,7 @@ func TestSafetyCheck(t *testing.T) { view := utils.NewSimpleView() header := unittest.BlockHeaderFixture() - context := fvm.NewContext(log, fvm.WithBlockHeader(&header)) + context := fvm.NewContext(log, fvm.WithBlockHeader(header)) sth := state.NewStateHolder(state.NewState(view)) diff --git a/model/flow/header_test.go b/model/flow/header_test.go index e86e862a9d2..4273890c87c 100644 --- a/model/flow/header_test.go +++ b/model/flow/header_test.go @@ -26,7 +26,7 @@ func TestHeaderEncodingJSON(t *testing.T) { require.NoError(t, err) decodedID := decoded.ID() assert.Equal(t, headerID, decodedID) - assert.Equal(t, header, decoded) + assert.Equal(t, *header, decoded) } func TestHeaderFingerprint(t *testing.T) { @@ -45,7 +45,7 @@ func TestHeaderFingerprint(t *testing.T) { ProposerID flow.Identifier } rlp.NewMarshaler().MustUnmarshal(data, &decoded) - decHeader := flow.Header{ + decHeader := &flow.Header{ ChainID: decoded.ChainID, ParentID: decoded.ParentID, Height: decoded.Height, @@ -56,11 +56,10 @@ func TestHeaderFingerprint(t *testing.T) { ParentVoterSigData: decoded.ParentVoterSigData, ProposerID: decoded.ProposerID, ProposerSigData: header.ProposerSigData, // since this field is not encoded/decoded, just set it to the original - // value to pass test } decodedID := decHeader.ID() assert.Equal(t, headerID, decodedID) - assert.Equal(t, header, decHeader) + assert.Equal(t, *header, *decHeader) } func TestHeaderEncodingMsgpack(t *testing.T) { @@ -73,7 +72,7 @@ func TestHeaderEncodingMsgpack(t *testing.T) { require.NoError(t, err) decodedID := decoded.ID() assert.Equal(t, headerID, decodedID) - assert.Equal(t, header, decoded) + assert.Equal(t, *header, decoded) } func TestHeaderEncodingCBOR(t *testing.T) { @@ -86,7 +85,7 @@ func TestHeaderEncodingCBOR(t *testing.T) { require.NoError(t, err) decodedID := decoded.ID() assert.Equal(t, headerID, decodedID) - assert.Equal(t, header, decoded) + assert.Equal(t, *header, decoded) } func TestNonUTCTimestampSameHashAsUTC(t *testing.T) { diff --git a/module/buffer/backend_test.go b/module/buffer/backend_test.go index f2f11ce299e..fa0fd3165b4 100644 --- a/module/buffer/backend_test.go +++ b/module/buffer/backend_test.go @@ -25,13 +25,13 @@ func (suite *BackendSuite) SetupTest() { func (suite *BackendSuite) Item() *item { parent := unittest.BlockHeaderFixture() - return suite.ItemWithParent(&parent) + return suite.ItemWithParent(parent) } func (suite *BackendSuite) ItemWithParent(parent *flow.Header) *item { header := unittest.BlockHeaderWithParentFixture(parent) return &item{ - header: &header, + header: header, payload: unittest.IdentifierFixture(), originID: unittest.IdentifierFixture(), } diff --git a/module/builder/collection/builder.go b/module/builder/collection/builder.go index 9aded4b8b18..ca7e964e2a1 100644 --- a/module/builder/collection/builder.go +++ b/module/builder/collection/builder.go @@ -298,7 +298,7 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er // build the payload from the transactions payload := cluster.PayloadFromTransactions(minRefID, transactions...) - header := flow.Header{ + header := &flow.Header{ ChainID: parent.ChainID, ParentID: parentID, Height: parent.Height + 1, @@ -310,13 +310,13 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er } // set fields specific to the consensus algorithm - err = setter(&header) + err = setter(header) if err != nil { return nil, fmt.Errorf("could not set fields to header: %w", err) } proposal = cluster.Block{ - Header: &header, + Header: header, Payload: &payload, } diff --git a/module/builder/consensus/builder.go b/module/builder/consensus/builder.go index d4f15c58699..fb2477a3c1f 100644 --- a/module/builder/consensus/builder.go +++ b/module/builder/consensus/builder.go @@ -627,15 +627,6 @@ func (b *Builder) createProposal(parentID flow.Identifier, Height: parent.Height + 1, Timestamp: timestamp, PayloadHash: payload.Hash(), - - // the following fields should be set by the custom function as needed - // NOTE: we could abstract all of this away into an interface{} field, - // but that would be over the top as we will probably always use hotstuff - View: 0, - ParentVoterIndices: nil, - ParentVoterSigData: nil, - ProposerID: flow.ZeroID, - ProposerSigData: nil, } // apply the custom fields setter of the consensus algorithm diff --git a/module/builder/consensus/builder_test.go b/module/builder/consensus/builder_test.go index 7d58121cfac..035243ba3f7 100644 --- a/module/builder/consensus/builder_test.go +++ b/module/builder/consensus/builder_test.go @@ -498,7 +498,7 @@ func (bs *BuilderSuite) TestPayloadGuaranteeReferenceExpired() { // create 4 expired guarantees header := unittest.BlockHeaderFixture() header.Height = bs.headers[bs.finalID].Height - 12 - bs.headers[header.ID()] = &header + bs.headers[header.ID()] = header expired := unittest.CollectionGuaranteesFixture(4, unittest.WithCollRef(header.ID())) // add all guarantees to the pool diff --git a/module/chunks/chunkVerifier_test.go b/module/chunks/chunkVerifier_test.go index a788b69aa24..db40e0a13a6 100644 --- a/module/chunks/chunkVerifier_test.go +++ b/module/chunks/chunkVerifier_test.go @@ -245,7 +245,7 @@ func GetBaselineVerifiableChunk(t *testing.T, script string, system bool) *verif header := unittest.BlockHeaderFixture() header.PayloadHash = payload.Hash() block := flow.Block{ - Header: &header, + Header: header, Payload: &payload, } blockID := block.ID() @@ -354,7 +354,7 @@ func GetBaselineVerifiableChunk(t *testing.T, script string, system bool) *verif verifiableChunkData = verification.VerifiableChunkData{ IsSystemChunk: system, Chunk: &chunk, - Header: &header, + Header: header, Result: &result, ChunkDataPack: &chunkDataPack, EndState: flow.StateCommitment(endState), diff --git a/module/finalizer/collection/finalizer_test.go b/module/finalizer/collection/finalizer_test.go index cd21963856c..36768503bd5 100644 --- a/module/finalizer/collection/finalizer_test.go +++ b/module/finalizer/collection/finalizer_test.go @@ -57,7 +57,7 @@ func TestFinalizer(t *testing.T) { require.NoError(t, err) state, err = cluster.Bootstrap(db, stateRoot) require.NoError(t, err) - err = db.Update(operation.InsertHeader(refBlock.ID(), &refBlock)) + err = db.Update(operation.InsertHeader(refBlock.ID(), refBlock)) require.NoError(t, err) } diff --git a/module/finalizer/consensus/finalizer_test.go b/module/finalizer/consensus/finalizer_test.go index 8b683571760..c9d2a24fdc7 100644 --- a/module/finalizer/consensus/finalizer_test.go +++ b/module/finalizer/consensus/finalizer_test.go @@ -49,15 +49,15 @@ func TestMakeFinalValidChain(t *testing.T) { final.Height = uint64(rand.Uint32()) // generate a couple of children that are pending - parent := &final + parent := final var pending []*flow.Header total := 8 for i := 0; i < total; i++ { header := unittest.BlockHeaderFixture() header.Height = parent.Height + 1 header.ParentID = parent.ID() - pending = append(pending, &header) - parent = &header + pending = append(pending, header) + parent = header } // create a mock protocol state to check finalize calls @@ -85,7 +85,7 @@ func TestMakeFinalValidChain(t *testing.T) { require.NoError(t, err) // insert the finalized block header into the DB - err = db.Update(operation.InsertHeader(final.ID(), &final)) + err = db.Update(operation.InsertHeader(final.ID(), final)) require.NoError(t, err) // insert all of the pending blocks into the DB @@ -143,11 +143,11 @@ func TestMakeFinalInvalidHeight(t *testing.T) { require.NoError(t, err) // insert the finalized block header into the DB - err = db.Update(operation.InsertHeader(final.ID(), &final)) + err = db.Update(operation.InsertHeader(final.ID(), final)) require.NoError(t, err) // insert all of the pending header into DB - err = db.Update(operation.InsertHeader(pending.ID(), &pending)) + err = db.Update(operation.InsertHeader(pending.ID(), pending)) require.NoError(t, err) // initialize the finalizer with the dependencies and make the call @@ -195,7 +195,7 @@ func TestMakeFinalDuplicate(t *testing.T) { require.NoError(t, err) // insert the finalized block header into the DB - err = db.Update(operation.InsertHeader(final.ID(), &final)) + err = db.Update(operation.InsertHeader(final.ID(), final)) require.NoError(t, err) // initialize the finalizer with the dependencies and make the call diff --git a/module/jobqueue/jobs_test.go b/module/jobqueue/jobs_test.go index b78489a756f..6aaf4caa94a 100644 --- a/module/jobqueue/jobs_test.go +++ b/module/jobqueue/jobs_test.go @@ -46,7 +46,7 @@ func TestBlockJob(t *testing.T) { func TestBlockHeaderJob(t *testing.T) { block := unittest.BlockHeaderFixture() - job := jobqueue.BlockHeaderToJob(&block) + job := jobqueue.BlockHeaderToJob(block) t.Run("job is correct type", func(t *testing.T) { assert.IsType(t, &jobqueue.BlockHeaderJob{}, job, "job is not a block job") @@ -60,7 +60,7 @@ func TestBlockHeaderJob(t *testing.T) { t.Run("job converts to header", func(t *testing.T) { b, err := jobqueue.JobToBlockHeader(job) assert.NoError(t, err, "unexpected error converting notify job to header") - assert.Equal(t, block, *b, "converted header is not the same as the original header") + assert.Equal(t, *block, *b, "converted header is not the same as the original header") }) t.Run("incorrect job type fails to convert to header", func(t *testing.T) { diff --git a/module/state_synchronization/execution_data_cid_cache_test.go b/module/state_synchronization/execution_data_cid_cache_test.go index e0a5aa49a8a..e4d7e3a5481 100644 --- a/module/state_synchronization/execution_data_cid_cache_test.go +++ b/module/state_synchronization/execution_data_cid_cache_test.go @@ -17,7 +17,7 @@ func TestCacheHit(t *testing.T) { header := unittest.BlockHeaderFixture() var blobTree state_synchronization.BlobTree blobTree = append(blobTree, []cid.Cid{unittest.CidFixture(), unittest.CidFixture()}, []cid.Cid{unittest.CidFixture()}) - cache.Insert(&header, blobTree) + cache.Insert(header, blobTree) for height, cids := range blobTree { for index, cid := range cids { @@ -47,10 +47,10 @@ func TestCacheEviction(t *testing.T) { for i := uint(0); i < 2*size; i++ { header := unittest.BlockHeaderFixture() - headers = append(headers, &header) + headers = append(headers, header) root := unittest.CidFixture() cids = append(cids, root) - cache.Insert(&header, [][]cid.Cid{{root}}) + cache.Insert(header, [][]cid.Cid{{root}}) expectedSize := i + 1 if expectedSize > size { @@ -86,8 +86,8 @@ func TestDuplicateCID(t *testing.T) { header1 := unittest.BlockHeaderFixture() header2 := unittest.BlockHeaderFixture() - cache.Insert(&header1, [][]cid.Cid{{c}}) - cache.Insert(&header2, [][]cid.Cid{{c}}) + cache.Insert(header1, [][]cid.Cid{{c}}) + cache.Insert(header2, [][]cid.Cid{{c}}) assert.Equal(t, cache.BlobRecords(), uint(1)) assert.Equal(t, cache.BlobTreeRecords(), uint(2)) @@ -117,9 +117,9 @@ func TestCacheEvictionWithDuplicateCID(t *testing.T) { var blobTree3 state_synchronization.BlobTree blobTree3 = append(blobTree3, []cid.Cid{unittest.CidFixture()}) - cache.Insert(&header1, blobTree1) - cache.Insert(&header2, blobTree2) - cache.Insert(&header3, blobTree3) + cache.Insert(header1, blobTree1) + cache.Insert(header2, blobTree2) + cache.Insert(header3, blobTree3) assert.Equal(t, cache.BlobRecords(), uint(2)) assert.Equal(t, cache.BlobTreeRecords(), uint(2)) diff --git a/module/state_synchronization/requester/jobs/execution_data_reader_test.go b/module/state_synchronization/requester/jobs/execution_data_reader_test.go index 80ea99a068d..53373a9bf87 100644 --- a/module/state_synchronization/requester/jobs/execution_data_reader_test.go +++ b/module/state_synchronization/requester/jobs/execution_data_reader_test.go @@ -51,7 +51,7 @@ func (suite *ExecutionDataReaderSuite) SetupTest() { suite.executionDataID = unittest.IdentifierFixture() parent := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(1)) - suite.block = unittest.BlockWithParentFixture(&parent) + suite.block = unittest.BlockWithParentFixture(parent) suite.blocksByHeight = map[uint64]*flow.Block{ suite.block.Header.Height: suite.block, } diff --git a/module/synchronization/core_rapid_test.go b/module/synchronization/core_rapid_test.go index a8d31adf35d..0134e225bf8 100644 --- a/module/synchronization/core_rapid_test.go +++ b/module/synchronization/core_rapid_test.go @@ -21,8 +21,8 @@ const NUM_BLOCKS int = 100 // This returns a forest of blocks, some of which are in a parent relationship // It should include forks -func populatedBlockStore(t *rapid.T) []flow.Header { - store := []flow.Header{unittest.BlockHeaderFixture()} +func populatedBlockStore(t *rapid.T) []*flow.Header { + store := []*flow.Header{unittest.BlockHeaderFixture()} for i := 1; i < NUM_BLOCKS; i++ { // we sample from the store 2/3 times to get deeper trees b := rapid.OneOf(rapid.Just(unittest.BlockHeaderFixture()), rapid.SampledFrom(store), rapid.SampledFrom(store)).Draw(t, "parent").(flow.Header) @@ -32,7 +32,7 @@ func populatedBlockStore(t *rapid.T) []flow.Header { } type rapidSync struct { - store []flow.Header + store []*flow.Header core *Core idRequests map[flow.Identifier]bool // depth 1 pushdown automaton to track ID requests heightRequests map[uint64]bool // depth 1 pushdown automaton to track height requests @@ -52,7 +52,7 @@ func (r *rapidSync) Init(t *rapid.T) { // RequestByID is an action that requests a block by its ID. func (r *rapidSync) RequestByID(t *rapid.T) { - b := rapid.SampledFrom(r.store).Draw(t, "id_request").(flow.Header) + b := rapid.SampledFrom(r.store).Draw(t, "id_request").(*flow.Header) r.core.RequestBlock(b.ID(), b.Height) // Re-queueing by ID should always succeed r.idRequests[b.ID()] = true @@ -62,7 +62,7 @@ func (r *rapidSync) RequestByID(t *rapid.T) { // RequestByHeight is an action that requests a specific height func (r *rapidSync) RequestByHeight(t *rapid.T) { - b := rapid.SampledFrom(r.store).Draw(t, "height_request").(flow.Header) + b := rapid.SampledFrom(r.store).Draw(t, "height_request").(*flow.Header) r.core.RequestHeight(b.Height) // Re-queueing by height should always succeed r.heightRequests[b.Height] = true @@ -71,10 +71,10 @@ func (r *rapidSync) RequestByHeight(t *rapid.T) { // HandleHeight is an action that requests a heights // upon receiving an argument beyond a certain tolerance func (r *rapidSync) HandleHeight(t *rapid.T) { - b := rapid.SampledFrom(r.store).Draw(t, "height_hint_request").(flow.Header) + b := rapid.SampledFrom(r.store).Draw(t, "height_hint_request").(*flow.Header) incr := rapid.IntRange(0, (int)(DefaultConfig().Tolerance)+1).Draw(t, "height increment").(int) requestHeight := b.Height + (uint64)(incr) - r.core.HandleHeight(&b, requestHeight) + r.core.HandleHeight(b, requestHeight) // Re-queueing by height should always succeed if beyond tolerance if (uint)(incr) > DefaultConfig().Tolerance { for h := b.Height + 1; h <= requestHeight; h++ { @@ -85,8 +85,8 @@ func (r *rapidSync) HandleHeight(t *rapid.T) { // HandleByID is an action that provides a block header to the sync engine func (r *rapidSync) HandleByID(t *rapid.T) { - b := rapid.SampledFrom(r.store).Draw(t, "id_handling").(flow.Header) - success := r.core.HandleBlock(&b) + b := rapid.SampledFrom(r.store).Draw(t, "id_handling").(*flow.Header) + success := r.core.HandleBlock(b) assert.True(t, success || r.idRequests[b.ID()] == false) // we decrease the pending requests iff we have already requested this block @@ -101,15 +101,15 @@ func (r *rapidSync) HandleByID(t *rapid.T) { // Check runs after every action and verifies that all required invariants hold. func (r *rapidSync) Check(t *rapid.T) { // we collect the received blocks as determined above - var receivedBlocks []flow.Header + var receivedBlocks []*flow.Header // we also collect the pending blocks - var activeBlocks []flow.Header + var activeBlocks []*flow.Header // we check the validity of our pushdown automaton for ID requests and populate activeBlocks / receivedBlocks for id, requested := range r.idRequests { s, foundID := r.core.blockIDs[id] - block, foundBlock := findHeader(r.store, func(h flow.Header) bool { + block, foundBlock := findHeader(r.store, func(h *flow.Header) bool { return h.ID() == id }) require.True(t, foundBlock, "incorrect management of idRequests in the tests: all added IDs are supposed to be from the store") @@ -119,12 +119,12 @@ func (r *rapidSync) Check(t *rapid.T) { assert.True(t, s.WasQueued(), "ID %v was expected to be Queued and is %v", id, s.StatusString()) assert.False(t, s.WasReceived(), "ID %v was expected to be Queued and is %v", id, s.StatusString()) - activeBlocks = append(activeBlocks, *block) + activeBlocks = append(activeBlocks, block) } else { if foundID { // if a block is known with 0 pendings, it's because it was received assert.True(t, s.WasReceived(), "ID %v was expected to be Received and is %v", id, s.StatusString()) - receivedBlocks = append(receivedBlocks, *block) + receivedBlocks = append(receivedBlocks, block) } } } @@ -147,10 +147,10 @@ func (r *rapidSync) Check(t *rapid.T) { // - or because a request for a block at that height made us "forget" the prior height reception (clobberedByID) if ok { wasReceived := s.WasReceived() - _, blockAtHeightWasReceived := findHeader(receivedBlocks, func(header flow.Header) bool { + _, blockAtHeightWasReceived := findHeader(receivedBlocks, func(header *flow.Header) bool { return header.Height == h }) - _, clobberedByID := findHeader(activeBlocks, func(header flow.Header) bool { + _, clobberedByID := findHeader(activeBlocks, func(header *flow.Header) bool { return header.Height == h }) heightWasCanceled := wasReceived || blockAtHeightWasReceived || clobberedByID @@ -178,10 +178,10 @@ func TestRapidSync(t *testing.T) { } // utility functions -func findHeader(store []flow.Header, predicate func(flow.Header) bool) (*flow.Header, bool) { +func findHeader(store []*flow.Header, predicate func(*flow.Header) bool) (*flow.Header, bool) { for _, b := range store { if predicate(b) { - return &b, true + return b, true } } return nil, false diff --git a/module/synchronization/core_test.go b/module/synchronization/core_test.go index cbd67e31b01..4721a647b6d 100644 --- a/module/synchronization/core_test.go +++ b/module/synchronization/core_test.go @@ -153,29 +153,29 @@ func (ss *SyncSuite) TestHandleBlock() { requestedByID := unittest.BlockHeaderFixture() ss.core.blockIDs[requestedByID.ID()] = ss.RequestedStatus() received := unittest.BlockHeaderFixture() - ss.core.heights[received.Height] = ss.ReceivedStatus(&received) - ss.core.blockIDs[received.ID()] = ss.ReceivedStatus(&received) + ss.core.heights[received.Height] = ss.ReceivedStatus(received) + ss.core.blockIDs[received.ID()] = ss.ReceivedStatus(received) // should ignore un-requested blocks - shouldProcess := ss.core.HandleBlock(&unrequested) + shouldProcess := ss.core.HandleBlock(unrequested) ss.Assert().False(shouldProcess, "should not process un-requested block") ss.Assert().NotContains(ss.core.heights, unrequested.Height) ss.Assert().NotContains(ss.core.blockIDs, unrequested.ID()) // should mark queued blocks as received, and process them - shouldProcess = ss.core.HandleBlock(&queuedByHeight) + shouldProcess = ss.core.HandleBlock(queuedByHeight) ss.Assert().True(shouldProcess, "should process queued block") ss.Assert().True(ss.core.blockIDs[queuedByHeight.ID()].WasReceived(), "status should be reflected in block ID map") ss.Assert().True(ss.core.heights[queuedByHeight.Height].WasReceived(), "status should be reflected in height map") // should mark requested block as received, and process them - shouldProcess = ss.core.HandleBlock(&requestedByID) + shouldProcess = ss.core.HandleBlock(requestedByID) ss.Assert().True(shouldProcess, "should process requested block") ss.Assert().True(ss.core.blockIDs[requestedByID.ID()].WasReceived(), "status should be reflected in block ID map") ss.Assert().True(ss.core.heights[requestedByID.Height].WasReceived(), "status should be reflected in height map") // should leave received blocks, and not process them - shouldProcess = ss.core.HandleBlock(&received) + shouldProcess = ss.core.HandleBlock(received) ss.Assert().False(shouldProcess, "should not process already received block") ss.Assert().True(ss.core.blockIDs[received.ID()].WasReceived(), "status should remain reflected in block ID map") ss.Assert().True(ss.core.heights[received.Height].WasReceived(), "status should remain reflected in height map") @@ -189,15 +189,15 @@ func (ss *SyncSuite) TestHandleHeight() { aboveOutsideTolerance := final.Height + uint64(ss.core.Config.Tolerance+1) // a height lower than finalized should be a no-op - ss.core.HandleHeight(&final, lower) + ss.core.HandleHeight(final, lower) ss.Assert().Len(ss.core.heights, 0) // a height higher than finalized, but within tolerance, should be a no-op - ss.core.HandleHeight(&final, aboveWithinTolerance) + ss.core.HandleHeight(final, aboveWithinTolerance) ss.Assert().Len(ss.core.heights, 0) // a height higher than finalized and outside tolerance should queue missing heights - ss.core.HandleHeight(&final, aboveOutsideTolerance) + ss.core.HandleHeight(final, aboveOutsideTolerance) ss.Assert().Len(ss.core.heights, int(aboveOutsideTolerance-final.Height)) for height := final.Height + 1; height <= aboveOutsideTolerance; height++ { ss.Assert().Contains(ss.core.heights, height) @@ -433,7 +433,7 @@ func (ss *SyncSuite) TestPrune() { blockIDsBefore := len(ss.core.blockIDs) // prune the pending requests - ss.core.prune(&final) + ss.core.prune(final) assert.Equal(ss.T(), heightsBefore-len(prunableHeights), len(ss.core.heights)) assert.Equal(ss.T(), blockIDsBefore-len(prunableBlockIDs), len(ss.core.blockIDs)) diff --git a/network/p2p/topic_validator_test.go b/network/p2p/topic_validator_test.go index fffb9a73008..c2cb3409b79 100644 --- a/network/p2p/topic_validator_test.go +++ b/network/p2p/topic_validator_test.go @@ -81,7 +81,7 @@ func TestTopicValidator_Unstaked(t *testing.T) { defer cancel5s() // create a dummy block proposal to publish from our SN node header := unittest.BlockHeaderFixture() - data1 := getMsgFixtureBz(t, &messages.BlockProposal{Header: &header}) + data1 := getMsgFixtureBz(t, &messages.BlockProposal{Header: header}) err = sn2.Publish(timedCtx, topic, data1) require.NoError(t, err) @@ -224,7 +224,7 @@ func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { defer cancel5s() // create a dummy block proposal to publish from our SN node header := unittest.BlockHeaderFixture() - data1 := getMsgFixtureBz(t, &messages.BlockProposal{Header: &header}) + data1 := getMsgFixtureBz(t, &messages.BlockProposal{Header: header}) // sn2 publishes the block proposal, sn1 and an1 should receive the message because // SN nodes are authorized to send block proposals @@ -243,7 +243,7 @@ func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { timedCtx, cancel2s := context.WithTimeout(context.Background(), 2*time.Second) defer cancel2s() header = unittest.BlockHeaderFixture() - data2 := getMsgFixtureBz(t, &messages.BlockProposal{Header: &header}) + data2 := getMsgFixtureBz(t, &messages.BlockProposal{Header: header}) // the access node now publishes the block proposal message, AN are not authorized to publish block proposals // the message should be rejected by the topic validator on sn1 @@ -325,7 +325,7 @@ func TestAuthorizedSenderValidator_InvalidMsg(t *testing.T) { defer cancel5s() // create a dummy block proposal to publish from our SN node header := unittest.BlockHeaderFixture() - data1 := getMsgFixtureBz(t, &messages.BlockProposal{Header: &header}) + data1 := getMsgFixtureBz(t, &messages.BlockProposal{Header: header}) // sn2 publishes the block proposal on the sync committee channel err = sn2.Publish(timedCtx, topic, data1) @@ -404,7 +404,7 @@ func TestAuthorizedSenderValidator_Ejected(t *testing.T) { defer cancel5s() // create a dummy block proposal to publish from our SN node header := unittest.BlockHeaderFixture() - data1 := getMsgFixtureBz(t, &messages.BlockProposal{Header: &header}) + data1 := getMsgFixtureBz(t, &messages.BlockProposal{Header: header}) // sn2 publishes the block proposal, sn1 and an1 should receive the message because // SN nodes are authorized to send block proposals @@ -424,7 +424,7 @@ func TestAuthorizedSenderValidator_Ejected(t *testing.T) { // "eject" sn2 to ensure messages published by ejected nodes get rejected identity2.Ejected = true header = unittest.BlockHeaderFixture() - data3 := getMsgFixtureBz(t, &messages.BlockProposal{Header: &header}) + data3 := getMsgFixtureBz(t, &messages.BlockProposal{Header: header}) timedCtx, cancel2s := context.WithTimeout(context.Background(), time.Second) defer cancel2s() err = sn2.Publish(timedCtx, topic, data3) diff --git a/state/fork/traversal_test.go b/state/fork/traversal_test.go index f9101a36b34..111981375df 100644 --- a/state/fork/traversal_test.go +++ b/state/fork/traversal_test.go @@ -23,7 +23,7 @@ type TraverseSuite struct { byID map[flow.Identifier]*flow.Header byHeight map[uint64]*flow.Header headers *mockstorage.Headers - genesis flow.Header + genesis *flow.Header } func (s *TraverseSuite) SetupTest() { @@ -46,16 +46,16 @@ func (s *TraverseSuite) SetupTest() { // populate the mocked header storage with genesis and 10 child blocks genesis := unittest.BlockHeaderFixture() genesis.Height = 0 - s.byID[genesis.ID()] = &genesis - s.byHeight[genesis.Height] = &genesis + s.byID[genesis.ID()] = genesis + s.byHeight[genesis.Height] = genesis s.genesis = genesis - parent := &genesis + parent := genesis for i := 0; i < 10; i++ { child := unittest.BlockHeaderWithParentFixture(parent) - s.byID[child.ID()] = &child - s.byHeight[child.Height] = &child - parent = &child + s.byID[child.ID()] = child + s.byHeight[child.Height] = child + parent = child } } @@ -506,13 +506,13 @@ func (s *TraverseSuite) TestTraverse_OnDifferentForkThanTerminalBlock() { noopVisitor := func(header *flow.Header) error { return nil } // make other fork - otherForkHead := &s.genesis + otherForkHead := s.genesis otherForkByHeight := make(map[uint64]*flow.Header) for i := 0; i < 10; i++ { child := unittest.BlockHeaderWithParentFixture(otherForkHead) - s.byID[child.ID()] = &child - otherForkByHeight[child.Height] = &child - otherForkHead = &child + s.byID[child.ID()] = child + otherForkByHeight[child.Height] = child + otherForkHead = child } terminalBlockID := otherForkByHeight[2].ID() diff --git a/state/protocol/events/gadgets/heights_test.go b/state/protocol/events/gadgets/heights_test.go index 3185815b76f..e47ba0d3f03 100644 --- a/state/protocol/events/gadgets/heights_test.go +++ b/state/protocol/events/gadgets/heights_test.go @@ -28,7 +28,7 @@ func TestHeights(t *testing.T) { for height := uint64(2); height <= 4; height++ { block := unittest.BlockHeaderFixture() block.Height = height - heights.BlockFinalized(&block) + heights.BlockFinalized(block) } // ensure callbacks were invoked correctly diff --git a/state/protocol/events/gadgets/views_test.go b/state/protocol/events/gadgets/views_test.go index 1520a357795..484531c4b53 100644 --- a/state/protocol/events/gadgets/views_test.go +++ b/state/protocol/events/gadgets/views_test.go @@ -41,7 +41,7 @@ func (m *viewsMachine) BlockFinalized(t *rapid.T) { block := unittest.BlockHeaderFixture() block.View = view - m.views.BlockFinalized(&block) + m.views.BlockFinalized(block) // increase the number of expected calls and remove those callbacks from our model for indexedView, nCallbacks := range m.callbacks { diff --git a/storage/badger/operation/headers_test.go b/storage/badger/operation/headers_test.go index fc7e09b8cb0..089ecea3848 100644 --- a/storage/badger/operation/headers_test.go +++ b/storage/badger/operation/headers_test.go @@ -17,7 +17,7 @@ import ( func TestHeaderInsertCheckRetrieve(t *testing.T) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := flow.Header{ + expected := &flow.Header{ View: 1337, Timestamp: time.Now().UTC(), ParentID: flow.Identifier{0x11}, @@ -29,14 +29,14 @@ func TestHeaderInsertCheckRetrieve(t *testing.T) { } blockID := expected.ID() - err := db.Update(InsertHeader(expected.ID(), &expected)) + err := db.Update(InsertHeader(expected.ID(), expected)) require.Nil(t, err) var actual flow.Header err = db.View(RetrieveHeader(blockID, &actual)) require.Nil(t, err) - assert.Equal(t, expected, actual) + assert.Equal(t, *expected, actual) }) } diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index b254c1563e9..a233d106c6f 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -155,7 +155,7 @@ func AccountFixture() (*flow.Account, error) { func BlockFixture() flow.Block { header := BlockHeaderFixture() - return *BlockWithParentFixture(&header) + return *BlockWithParentFixture(header) } func FullBlockFixture() flow.Block { @@ -221,7 +221,7 @@ func PendingFromBlock(block *flow.Block) *flow.PendingBlock { func StateDeltaFixture() *messages.ExecutionStateDelta { header := BlockHeaderFixture() - block := BlockWithParentFixture(&header) + block := BlockWithParentFixture(header) return &messages.ExecutionStateDelta{ ExecutableBlock: entity.ExecutableBlock{ Block: block, @@ -301,7 +301,7 @@ func BlockWithParentFixture(parent *flow.Header) *flow.Block { header := BlockHeaderWithParentFixture(parent) header.PayloadHash = payload.Hash() return &flow.Block{ - Header: &header, + Header: header, Payload: &payload, } } @@ -311,7 +311,7 @@ func BlockWithGuaranteesFixture(guarantees []*flow.CollectionGuarantee) *flow.Bl header := BlockHeaderFixture() header.PayloadHash = payload.Hash() return &flow.Block{ - Header: &header, + Header: header, Payload: &payload, } @@ -360,7 +360,7 @@ func StateDeltaWithParentFixture(parent *flow.Header) *messages.ExecutionStateDe header := BlockHeaderWithParentFixture(parent) header.PayloadHash = payload.Hash() block := flow.Block{ - Header: &header, + Header: header, Payload: &payload, } @@ -392,7 +392,7 @@ func HeaderWithView(view uint64) func(*flow.Header) { } } -func BlockHeaderFixture(opts ...func(header *flow.Header)) flow.Header { +func BlockHeaderFixture(opts ...func(header *flow.Header)) *flow.Header { height := 1 + uint64(rand.Uint32()) // avoiding edge case of height = 0 (genesis block) view := height + uint64(rand.Intn(1000)) header := BlockHeaderWithParentFixture(&flow.Header{ @@ -403,7 +403,7 @@ func BlockHeaderFixture(opts ...func(header *flow.Header)) flow.Header { }) for _, opt := range opts { - opt(&header) + opt(header) } return header @@ -415,7 +415,7 @@ func CidFixture() cid.Cid { return blocks.NewBlock(data).Cid() } -func BlockHeaderFixtureOnChain(chainID flow.ChainID, opts ...func(header *flow.Header)) flow.Header { +func BlockHeaderFixtureOnChain(chainID flow.ChainID, opts ...func(header *flow.Header)) *flow.Header { height := 1 + uint64(rand.Uint32()) // avoiding edge case of height = 0 (genesis block) view := height + uint64(rand.Intn(1000)) header := BlockHeaderWithParentFixture(&flow.Header{ @@ -426,16 +426,16 @@ func BlockHeaderFixtureOnChain(chainID flow.ChainID, opts ...func(header *flow.H }) for _, opt := range opts { - opt(&header) + opt(header) } return header } -func BlockHeaderWithParentFixture(parent *flow.Header) flow.Header { +func BlockHeaderWithParentFixture(parent *flow.Header) *flow.Header { height := parent.Height + 1 view := parent.View + 1 + uint64(rand.Intn(10)) // Intn returns [0, n) - return flow.Header{ + return &flow.Header{ ChainID: parent.ChainID, ParentID: parent.ID(), Height: height, @@ -466,7 +466,7 @@ func ClusterBlockFixture() cluster.Block { header.PayloadHash = payload.Hash() return cluster.Block{ - Header: &header, + Header: header, Payload: payload, } } @@ -486,7 +486,7 @@ func ClusterBlockWithParent(parent *cluster.Block) cluster.Block { header.PayloadHash = payload.Hash() block := cluster.Block{ - Header: &header, + Header: header, Payload: payload, } @@ -603,7 +603,7 @@ func CompleteCollectionFromTransactions(txs []*flow.TransactionBody) *entity.Com func ExecutableBlockFixture(collectionsSignerIDs [][]flow.Identifier) *entity.ExecutableBlock { header := BlockHeaderFixture() - return ExecutableBlockFixtureWithParent(collectionsSignerIDs, &header) + return ExecutableBlockFixtureWithParent(collectionsSignerIDs, header) } func ExecutableBlockFixtureWithParent(collectionsSignerIDs [][]flow.Identifier, parent *flow.Header) *entity.ExecutableBlock { @@ -631,7 +631,7 @@ func ExecutableBlockFromTransactions(chain flow.ChainID, txss [][]*flow.Transact completeCollections := make(map[flow.Identifier]*entity.CompleteCollection, len(txss)) blockHeader := BlockHeaderFixtureOnChain(chain) - block := *BlockWithParentFixture(&blockHeader) + block := *BlockWithParentFixture(blockHeader) block.Payload.Guarantees = nil for _, txs := range txss { @@ -1301,7 +1301,7 @@ func VerifiableChunkDataFixture(chunkIndex uint64) *verification.VerifiableChunk header.PayloadHash = payload.Hash() block := flow.Block{ - Header: &header, + Header: header, Payload: &payload, } @@ -1501,7 +1501,7 @@ func SeedFixtures(m int, n int) [][]byte { } // BlockEventsFixture returns a block events model populated with random events of length n. -func BlockEventsFixture(header flow.Header, n int) flow.BlockEvents { +func BlockEventsFixture(header *flow.Header, n int) flow.BlockEvents { types := []flow.EventType{"A.0x1.Foo.Bar", "A.0x2.Zoo.Moo", "A.0x3.Goo.Hoo"} events := make([]flow.Event, n) diff --git a/utils/unittest/incorporated_results_seals.go b/utils/unittest/incorporated_results_seals.go index 918b52488b5..b92bd93ccae 100644 --- a/utils/unittest/incorporated_results_seals.go +++ b/utils/unittest/incorporated_results_seals.go @@ -20,7 +20,7 @@ func (f *incorporatedResultSealFactory) Fixture(opts ...func(*flow.IncorporatedR irSeal := &flow.IncorporatedResultSeal{ IncorporatedResult: ir, Seal: seal, - Header: &header, + Header: header, } for _, apply := range opts { From bca3b55e28d4487ecd5f0618e8ef4ba110f69a6d Mon Sep 17 00:00:00 2001 From: Jordan Schalm Date: Thu, 14 Jul 2022 09:59:35 -0700 Subject: [PATCH 198/223] add explicit note that root block is self-sealing --- model/flow/sealing_segment.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/model/flow/sealing_segment.go b/model/flow/sealing_segment.go index aca1349df92..129bc6cae99 100644 --- a/model/flow/sealing_segment.go +++ b/model/flow/sealing_segment.go @@ -31,9 +31,10 @@ import ( // // ROOT SEALING SEGMENTS: // Root sealing segments are sealing segments which contain the root block: +// * the root block is a self-sealing block with an empty payload // * the root block must be the first block (least height) in the segment // * no blocks in the segment may contain any seals (by the minimality requirement) -// * it is possible (but not necessary) for root sealing segments to contain only 1 self-sealing block +// * it is possible (but not necessary) for root sealing segments to contain only the root block // // Example 1 - one self-sealing root block // ROOT From bd3c03177542a3f0da15c95ffa927d2407250147 Mon Sep 17 00:00:00 2001 From: Patrick Lee Date: Thu, 14 Jul 2022 13:42:00 -0700 Subject: [PATCH 199/223] Invoke contracts directly instead of via currying --- fvm/contractFunctionInvocations.go | 241 +++++++++++++++-------------- fvm/scriptEnv.go | 17 +- fvm/transactionEnv.go | 32 ++-- fvm/transactionInvoker.go | 12 +- 4 files changed, 161 insertions(+), 141 deletions(-) diff --git a/fvm/contractFunctionInvocations.go b/fvm/contractFunctionInvocations.go index e1d187c5387..192f47436c3 100644 --- a/fvm/contractFunctionInvocations.go +++ b/fvm/contractFunctionInvocations.go @@ -18,30 +18,33 @@ var deductTransactionFeesInvocationArgumentTypes = []sema.Type{ sema.UInt64Type, } -// DeductTransactionFeesInvocation prepares a function that calls fee deduction on the service account -func DeductTransactionFeesInvocation( +// InvokeDeductTransactionFeesContract executes the fee deduction contract on +// the service account. +func InvokeDeductTransactionFeesContract( env Environment, traceSpan opentracing.Span, -) func(payer flow.Address, inclusionEffort uint64, executionEffort uint64) (cadence.Value, error) { + payer flow.Address, + inclusionEffort uint64, + executionEffort uint64, +) (cadence.Value, error) { + feesAddress := FlowFeesAddress(env.Context().Chain) - return func(payer flow.Address, inclusionEffort uint64, executionEffort uint64) (cadence.Value, error) { - invoker := NewContractFunctionInvoker( - common.AddressLocation{ - Address: common.Address(feesAddress), - Name: systemcontracts.ContractNameFlowFees, - }, - systemcontracts.ContractServiceAccountFunction_deductTransactionFee, - []interpreter.Value{ - interpreter.NewUnmeteredAddressValueFromBytes(payer.Bytes()), - interpreter.UFix64Value(inclusionEffort), - interpreter.UFix64Value(executionEffort), - }, - deductTransactionFeesInvocationArgumentTypes, - env.Context().Logger, - ) - return invoker.Invoke(env, traceSpan) - } + invoker := NewContractFunctionInvoker( + common.AddressLocation{ + Address: common.Address(feesAddress), + Name: systemcontracts.ContractNameFlowFees, + }, + systemcontracts.ContractServiceAccountFunction_deductTransactionFee, + []interpreter.Value{ + interpreter.NewUnmeteredAddressValueFromBytes(payer.Bytes()), + interpreter.UFix64Value(inclusionEffort), + interpreter.UFix64Value(executionEffort), + }, + deductTransactionFeesInvocationArgumentTypes, + env.Context().Logger, + ) + return invoker.Invoke(env, traceSpan) } var setupNewAccountInvocationArgumentTypes = []sema.Type{ @@ -49,105 +52,110 @@ var setupNewAccountInvocationArgumentTypes = []sema.Type{ sema.AuthAccountType, } -// SetupNewAccountInvocation prepares a function that calls new account setup on the service account -func SetupNewAccountInvocation( +// InvokeSetupNewAccountContract executes the new account setup contract on +// the service account. +func InvokeSetupNewAccountContract( env Environment, traceSpan opentracing.Span, -) func(flowAddress flow.Address, payer common.Address) (cadence.Value, error) { - return func(flowAddress flow.Address, payer common.Address) (cadence.Value, error) { - // uses `FlowServiceAccount.setupNewAccount` from https://github.com/onflow/flow-core-contracts/blob/master/contracts/FlowServiceAccount.cdc - invoker := NewContractFunctionInvoker( - common.AddressLocation{ - Address: common.Address(env.Context().Chain.ServiceAddress()), - Name: systemcontracts.ContractServiceAccount, - }, - systemcontracts.ContractServiceAccountFunction_setupNewAccount, - []interpreter.Value{ - interpreter.NewAddressValue(env, common.Address(flowAddress)), - interpreter.NewAddressValue(env, payer), - }, - setupNewAccountInvocationArgumentTypes, - env.Context().Logger, - ) - return invoker.Invoke(env, traceSpan) - } + flowAddress flow.Address, + payer common.Address, +) (cadence.Value, error) { + + // uses `FlowServiceAccount.setupNewAccount` from https://github.com/onflow/flow-core-contracts/blob/master/contracts/FlowServiceAccount.cdc + invoker := NewContractFunctionInvoker( + common.AddressLocation{ + Address: common.Address(env.Context().Chain.ServiceAddress()), + Name: systemcontracts.ContractServiceAccount, + }, + systemcontracts.ContractServiceAccountFunction_setupNewAccount, + []interpreter.Value{ + interpreter.NewAddressValue(env, common.Address(flowAddress)), + interpreter.NewAddressValue(env, payer), + }, + setupNewAccountInvocationArgumentTypes, + env.Context().Logger, + ) + return invoker.Invoke(env, traceSpan) } var accountAvailableBalanceInvocationArgumentTypes = []sema.Type{ &sema.AddressType{}, } -// AccountAvailableBalanceInvocation prepares a function that calls get available balance on the storage fees contract -func AccountAvailableBalanceInvocation( +// InvokeAccountAvailableBalanceContract executes the get available balance +// contract on the storage fees contract. +func InvokeAccountAvailableBalanceContract( env Environment, traceSpan opentracing.Span, -) func(address common.Address) (cadence.Value, error) { - return func(address common.Address) (cadence.Value, error) { - invoker := NewContractFunctionInvoker( - common.AddressLocation{ - Address: common.Address(env.Context().Chain.ServiceAddress()), - Name: systemcontracts.ContractStorageFees, - }, - systemcontracts.ContractStorageFeesFunction_defaultTokenAvailableBalance, - []interpreter.Value{ - interpreter.NewAddressValue(env, address), - }, - accountAvailableBalanceInvocationArgumentTypes, - env.Context().Logger, - ) - return invoker.Invoke(env, traceSpan) - } + address common.Address, +) (cadence.Value, error) { + + invoker := NewContractFunctionInvoker( + common.AddressLocation{ + Address: common.Address(env.Context().Chain.ServiceAddress()), + Name: systemcontracts.ContractStorageFees, + }, + systemcontracts.ContractStorageFeesFunction_defaultTokenAvailableBalance, + []interpreter.Value{ + interpreter.NewAddressValue(env, address), + }, + accountAvailableBalanceInvocationArgumentTypes, + env.Context().Logger, + ) + return invoker.Invoke(env, traceSpan) } var accountBalanceInvocationArgumentTypes = []sema.Type{ sema.PublicAccountType, } -// AccountBalanceInvocation prepares a function that calls get available balance on the service account -func AccountBalanceInvocation( +// InvokeAccountBalanceContract executes the get available balance contract +// on the service account. +func InvokeAccountBalanceContract( env Environment, traceSpan opentracing.Span, -) func(address common.Address) (cadence.Value, error) { - return func(address common.Address) (cadence.Value, error) { - invoker := NewContractFunctionInvoker( - common.AddressLocation{ - Address: common.Address(env.Context().Chain.ServiceAddress()), - Name: systemcontracts.ContractServiceAccount}, - systemcontracts.ContractServiceAccountFunction_defaultTokenBalance, - []interpreter.Value{ - interpreter.NewAddressValue(env, address), - }, - accountBalanceInvocationArgumentTypes, - env.Context().Logger, - ) - return invoker.Invoke(env, traceSpan) - } + address common.Address, +) (cadence.Value, error) { + + invoker := NewContractFunctionInvoker( + common.AddressLocation{ + Address: common.Address(env.Context().Chain.ServiceAddress()), + Name: systemcontracts.ContractServiceAccount}, + systemcontracts.ContractServiceAccountFunction_defaultTokenBalance, + []interpreter.Value{ + interpreter.NewAddressValue(env, address), + }, + accountBalanceInvocationArgumentTypes, + env.Context().Logger, + ) + return invoker.Invoke(env, traceSpan) } var accountStorageCapacityInvocationArgumentTypes = []sema.Type{ &sema.AddressType{}, } -// AccountStorageCapacityInvocation prepares a function that calls get storage capacity on the storage fees contract -func AccountStorageCapacityInvocation( +// InvokeAccountStorageCapacityContract executes the get storage capacity +// contract on the storage fees contract. +func InvokeAccountStorageCapacityContract( env Environment, traceSpan opentracing.Span, -) func(address common.Address) (cadence.Value, error) { - return func(address common.Address) (cadence.Value, error) { - invoker := NewContractFunctionInvoker( - common.AddressLocation{ - Address: common.Address(env.Context().Chain.ServiceAddress()), - Name: systemcontracts.ContractStorageFees, - }, - systemcontracts.ContractStorageFeesFunction_calculateAccountCapacity, - []interpreter.Value{ - interpreter.NewAddressValue(env, address), - }, - accountStorageCapacityInvocationArgumentTypes, - env.Context().Logger, - ) - return invoker.Invoke(env, traceSpan) - } + address common.Address, +) (cadence.Value, error) { + + invoker := NewContractFunctionInvoker( + common.AddressLocation{ + Address: common.Address(env.Context().Chain.ServiceAddress()), + Name: systemcontracts.ContractStorageFees, + }, + systemcontracts.ContractStorageFeesFunction_calculateAccountCapacity, + []interpreter.Value{ + interpreter.NewAddressValue(env, address), + }, + accountStorageCapacityInvocationArgumentTypes, + env.Context().Logger, + ) + return invoker.Invoke(env, traceSpan) } var useContractAuditVoucherInvocationArgumentTypes = []sema.Type{ @@ -155,30 +163,31 @@ var useContractAuditVoucherInvocationArgumentTypes = []sema.Type{ sema.StringType, } -// UseContractAuditVoucherInvocation prepares a function that can use a contract deployment audit voucher -func UseContractAuditVoucherInvocation( +// InvokeUseContractAuditVoucherContract executes the use a contract +// deployment audit voucher contract. +func InvokeUseContractAuditVoucherContract( env Environment, traceSpan opentracing.Span, -) func(address common.Address, code string) (bool, error) { - return func(address common.Address, code string) (bool, error) { - invoker := NewContractFunctionInvoker( - common.AddressLocation{ - Address: common.Address(env.Context().Chain.ServiceAddress()), - Name: systemcontracts.ContractDeploymentAudits, - }, - systemcontracts.ContractDeploymentAuditsFunction_useVoucherForDeploy, - []interpreter.Value{ - interpreter.NewAddressValue(env, address), - interpreter.NewUnmeteredStringValue(code), - }, - useContractAuditVoucherInvocationArgumentTypes, - env.Context().Logger, - ) - resultCdc, err := invoker.Invoke(env, traceSpan) - if err != nil { - return false, err - } - result := resultCdc.(cadence.Bool).ToGoValue().(bool) - return result, nil + address common.Address, + code string) (bool, error) { + + invoker := NewContractFunctionInvoker( + common.AddressLocation{ + Address: common.Address(env.Context().Chain.ServiceAddress()), + Name: systemcontracts.ContractDeploymentAudits, + }, + systemcontracts.ContractDeploymentAuditsFunction_useVoucherForDeploy, + []interpreter.Value{ + interpreter.NewAddressValue(env, address), + interpreter.NewUnmeteredStringValue(code), + }, + useContractAuditVoucherInvocationArgumentTypes, + env.Context().Logger, + ) + resultCdc, err := invoker.Invoke(env, traceSpan) + if err != nil { + return false, err } + result := resultCdc.(cadence.Bool).ToGoValue().(bool) + return result, nil } diff --git a/fvm/scriptEnv.go b/fvm/scriptEnv.go index e47b76a7c37..0a6956c4bf2 100644 --- a/fvm/scriptEnv.go +++ b/fvm/scriptEnv.go @@ -288,9 +288,10 @@ func (e *ScriptEnv) GetStorageCapacity(address common.Address) (value uint64, er return 0, fmt.Errorf("get storage capacity failed: %w", err) } - accountStorageCapacity := AccountStorageCapacityInvocation(e, e.traceSpan) - result, invokeErr := accountStorageCapacity(address) - + result, invokeErr := InvokeAccountStorageCapacityContract( + e, + e.traceSpan, + address) if invokeErr != nil { return 0, errors.HandleRuntimeError(invokeErr) } @@ -311,9 +312,7 @@ func (e *ScriptEnv) GetAccountBalance(address common.Address) (value uint64, err return 0, fmt.Errorf("get account balance failed: %w", err) } - accountBalance := AccountBalanceInvocation(e, e.traceSpan) - result, invokeErr := accountBalance(address) - + result, invokeErr := InvokeAccountBalanceContract(e, e.traceSpan, address) if invokeErr != nil { return 0, errors.HandleRuntimeError(invokeErr) } @@ -331,8 +330,10 @@ func (e *ScriptEnv) GetAccountAvailableBalance(address common.Address) (value ui return 0, fmt.Errorf("get account available balance failed: %w", err) } - accountAvailableBalance := AccountAvailableBalanceInvocation(e, e.traceSpan) - result, invokeErr := accountAvailableBalance(address) + result, invokeErr := InvokeAccountAvailableBalanceContract( + e, + e.traceSpan, + address) if invokeErr != nil { return 0, errors.HandleRuntimeError(invokeErr) diff --git a/fvm/transactionEnv.go b/fvm/transactionEnv.go index 5876ca90c87..13278f0fce4 100644 --- a/fvm/transactionEnv.go +++ b/fvm/transactionEnv.go @@ -307,8 +307,11 @@ func (e *TransactionEnv) GetIsContractDeploymentRestricted() (restricted bool, d } func (e *TransactionEnv) useContractAuditVoucher(address runtime.Address, code []byte) (bool, error) { - useVoucher := UseContractAuditVoucherInvocation(e, e.traceSpan) - return useVoucher(address, string(code[:])) + return InvokeUseContractAuditVoucherContract( + e, + e.traceSpan, + address, + string(code[:])) } func (e *TransactionEnv) isAuthorizerServiceAccount() bool { @@ -443,9 +446,10 @@ func (e *TransactionEnv) GetStorageCapacity(address common.Address) (value uint6 return value, fmt.Errorf("get storage capacity failed: %w", err) } - accountStorageCapacity := AccountStorageCapacityInvocation(e, e.traceSpan) - result, invokeErr := accountStorageCapacity(address) - + result, invokeErr := InvokeAccountStorageCapacityContract( + e, + e.traceSpan, + address) if invokeErr != nil { return 0, errors.HandleRuntimeError(invokeErr) } @@ -471,9 +475,7 @@ func (e *TransactionEnv) GetAccountBalance(address common.Address) (value uint64 return value, fmt.Errorf("get account balance failed: %w", err) } - accountBalance := AccountBalanceInvocation(e, e.traceSpan) - result, invokeErr := accountBalance(address) - + result, invokeErr := InvokeAccountBalanceContract(e, e.traceSpan, address) if invokeErr != nil { return 0, errors.HandleRuntimeError(invokeErr) } @@ -491,8 +493,10 @@ func (e *TransactionEnv) GetAccountAvailableBalance(address common.Address) (val return value, fmt.Errorf("get account available balance failed: %w", err) } - accountAvailableBalance := AccountAvailableBalanceInvocation(e, e.traceSpan) - result, invokeErr := accountAvailableBalance(address) + result, invokeErr := InvokeAccountAvailableBalanceContract( + e, + e.traceSpan, + address) if invokeErr != nil { return 0, errors.HandleRuntimeError(invokeErr) @@ -957,9 +961,11 @@ func (e *TransactionEnv) CreateAccount(payer runtime.Address) (address runtime.A } if e.ctx.ServiceAccountEnabled { - setupNewAccount := SetupNewAccountInvocation(e, e.traceSpan) - _, invokeErr := setupNewAccount(flowAddress, payer) - + _, invokeErr := InvokeSetupNewAccountContract( + e, + e.traceSpan, + flowAddress, + payer) if invokeErr != nil { return address, errors.HandleRuntimeError(invokeErr) } diff --git a/fvm/transactionInvoker.go b/fvm/transactionInvoker.go index 750d3503d3f..80854769f6e 100644 --- a/fvm/transactionInvoker.go +++ b/fvm/transactionInvoker.go @@ -274,11 +274,15 @@ func (i *TransactionInvoker) deductTransactionFees( computationUsed = uint64(sth.State().TotalComputationLimit()) } - deductTxFees := DeductTransactionFeesInvocation(env, proc.TraceSpan) - // Hardcoded inclusion effort (of 1.0 UFix). Eventually this will be dynamic. - // Execution effort will be connected to computation used. + // Hardcoded inclusion effort (of 1.0 UFix). Eventually this will be + // dynamic. Execution effort will be connected to computation used. inclusionEffort := uint64(100_000_000) - _, err = deductTxFees(proc.Transaction.Payer, inclusionEffort, computationUsed) + _, err = InvokeDeductTransactionFeesContract( + env, + proc.TraceSpan, + proc.Transaction.Payer, + inclusionEffort, + computationUsed) if err != nil { return errors.NewTransactionFeeDeductionFailedError(proc.Transaction.Payer, err) From 056e8170fc3917535d6e44986414718295f26d04 Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Thu, 14 Jul 2022 14:57:53 -0700 Subject: [PATCH 200/223] chore(localnet): fix observer's trace endpoint --- integration/localnet/bootstrap.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/integration/localnet/bootstrap.go b/integration/localnet/bootstrap.go index aa6343be770..c42ec128eac 100644 --- a/integration/localnet/bootstrap.go +++ b/integration/localnet/bootstrap.go @@ -533,8 +533,7 @@ func prepareObserverService(i int, observerName string, agPublicKey string) Serv fmt.Sprintf("%s:/data:z", dataDir), }, Environment: []string{ - "JAEGER_AGENT_HOST=jaeger", - "JAEGER_AGENT_PORT=6831", + "JAEGER_ENDPOINT=http://tempo:14268/api/traces", "BINSTAT_ENABLE", "BINSTAT_LEN_WHAT", "BINSTAT_DMP_NAME", From 2b64e01df4cda097be9c8b0395a0e1104ad6d04f Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Thu, 14 Jul 2022 21:50:31 -0700 Subject: [PATCH 201/223] chore(tests): add trace benchmark --- module/trace/trace_test.go | 66 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 module/trace/trace_test.go diff --git a/module/trace/trace_test.go b/module/trace/trace_test.go new file mode 100644 index 00000000000..c8d0059bb7f --- /dev/null +++ b/module/trace/trace_test.go @@ -0,0 +1,66 @@ +package trace + +import ( + "context" + "math/rand" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" +) + +func BenchmarkStartSpanFromParent(b *testing.B) { + tracer, err := NewTracer(zerolog.Logger{}, "test", string(flow.Localnet), 0) + require.NoError(b, err) + + tracer.Ready() + defer tracer.Done() + + span, _, sampled := tracer.StartTransactionSpan(context.Background(), flow.Identifier{}, "test") + require.True(b, sampled) + defer span.Finish() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + s := tracer.StartSpanFromParent(span, SpanName("test")) + s.Finish() + } + b.StopTimer() +} + +func BenchmarkStartTransactionSpan(b *testing.B) { + tracer, err := NewTracer(zerolog.Logger{}, "test", string(flow.Localnet), 0) + require.NoError(b, err) + + tracer.Ready() + defer tracer.Done() + + for _, t := range []struct { + name string + n int + }{ + {name: "cacheHit", n: 100}, + {name: "cacheMiss", n: 100000}, + } { + t := t + b.Run(t.name, func(b *testing.B) { + randomIDs := make([]flow.Identifier, 0, t.n) + for i := 0; i < t.n; i++ { + var randomBytes [flow.IdentifierLen]byte + _, err := rand.Read(randomBytes[:]) + require.NoError(b, err) + randomIDs = append(randomIDs, flow.Identifier(randomBytes)) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + span, _, sampled := tracer.StartTransactionSpan(context.Background(), randomIDs[i%t.n], "test") + require.True(b, sampled) + span.Finish() + } + b.StopTimer() + }) + } +} From a8fb26794daaef72fafa03c8af86db3ecd234cdf Mon Sep 17 00:00:00 2001 From: lolpuddle Date: Fri, 15 Jul 2022 04:10:39 -0400 Subject: [PATCH 202/223] add metrics and debug logs for changes to conn pool --- .../access/rpc/backend/connection_factory.go | 53 ++++++++----------- engine/access/rpc/engine.go | 5 +- module/metrics.go | 3 ++ module/metrics/access.go | 23 +++++--- module/metrics/noop.go | 1 + 5 files changed, 47 insertions(+), 38 deletions(-) diff --git a/engine/access/rpc/backend/connection_factory.go b/engine/access/rpc/backend/connection_factory.go index 33ecf9b0332..1342e34ffbb 100644 --- a/engine/access/rpc/backend/connection_factory.go +++ b/engine/access/rpc/backend/connection_factory.go @@ -10,6 +10,7 @@ import ( lru "github.com/hashicorp/golang-lru" "github.com/onflow/flow/protobuf/go/flow/access" "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/rs/zerolog" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" @@ -51,11 +52,13 @@ type ConnectionFactoryImpl struct { ConnectionsCache *lru.Cache CacheSize uint AccessMetrics module.AccessMetrics + Log zerolog.Logger mutex sync.Mutex } type ConnectionCacheStore struct { ClientConn *grpc.ClientConn + Address string mutex *sync.Mutex } @@ -67,7 +70,9 @@ func (cf *ConnectionFactoryImpl) createConnection(address string, timeout time.D } keepaliveParams := keepalive.ClientParameters{ - Time: 10 * time.Second, + // how long the client will wait before sending a keepalive to the server if there is no activity + Time: 10 * time.Second, + // how long the client will wait for a response from the keepalive before closing Timeout: timeout, } @@ -89,53 +94,39 @@ func (cf *ConnectionFactoryImpl) createConnection(address string, timeout time.D func (cf *ConnectionFactoryImpl) retrieveConnection(grpcAddress string, timeout time.Duration) (*grpc.ClientConn, error) { var conn *grpc.ClientConn - var clientMutex *sync.Mutex var store *ConnectionCacheStore cf.mutex.Lock() if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { store = res.(*ConnectionCacheStore) - clientMutex = store.mutex conn = store.ClientConn if cf.AccessMetrics != nil { cf.AccessMetrics.ConnectionFromPoolRetrieved() } } else { - clientMutex = new(sync.Mutex) store = &ConnectionCacheStore{ ClientConn: nil, - mutex: clientMutex, + Address: grpcAddress, + mutex: new(sync.Mutex), } + cf.Log.Debug().Str("grpc_conn_added", grpcAddress).Msg("adding grpc connection to pool") cf.ConnectionsCache.Add(grpcAddress, store) + cf.AccessMetrics.ConnectionAddedToPool() } - clientMutex.Lock() - defer clientMutex.Unlock() cf.mutex.Unlock() + store.mutex.Lock() + defer store.mutex.Unlock() if conn == nil || conn.GetState() == connectivity.Shutdown { - // updates to the cache don't trigger evictions; this line closes connections before re-establishing new ones - if conn != nil { - conn.Close() - } var err error - conn, err = cf.addConnection(grpcAddress, timeout, store) + conn, err = cf.createConnection(grpcAddress, timeout) if err != nil { return nil, err } - } - return conn, nil -} - -func (cf *ConnectionFactoryImpl) addConnection(grpcAddress string, timeout time.Duration, store *ConnectionCacheStore) (*grpc.ClientConn, error) { - conn, err := cf.createConnection(grpcAddress, timeout) - if err != nil { - return nil, err - } - - store.ClientConn = conn - - if cf.AccessMetrics != nil { - cf.AccessMetrics.TotalConnectionsInPool(uint(cf.ConnectionsCache.Len()), cf.CacheSize) + store.ClientConn = conn + if cf.AccessMetrics != nil { + cf.AccessMetrics.TotalConnectionsInPool(uint(cf.ConnectionsCache.Len()), cf.CacheSize) + } } return conn, nil } @@ -184,13 +175,13 @@ func (cf *ConnectionFactoryImpl) invalidateAPIClient(address string, port uint) grpcAddress, _ := getGRPCAddress(address, port) if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { store := res.(*ConnectionCacheStore) + conn := store.ClientConn store.mutex.Lock() - if store.ClientConn != nil { - conn := store.ClientConn - conn.Close() - store.ClientConn = nil - } + store.ClientConn = nil store.mutex.Unlock() + // allow time for any existing requests to finish before closing the connection + time.Sleep(DefaultClientTimeout) + conn.Close() } } diff --git a/engine/access/rpc/engine.go b/engine/access/rpc/engine.go index 7a3732230b0..2936a6564b9 100644 --- a/engine/access/rpc/engine.go +++ b/engine/access/rpc/engine.go @@ -139,7 +139,9 @@ func NewBuilder(log zerolog.Logger, cache, err := lru.NewWithEvict(int(cacheSize), func(_, evictedValue interface{}) { // allow time for any existing requests to finish before closing the connection time.Sleep(backend.DefaultClientTimeout) - evictedValue.(*backend.ConnectionCacheStore).ClientConn.Close() + store := evictedValue.(*backend.ConnectionCacheStore) + log.Debug().Str("grpc_conn_evicted", store.Address).Msg("closing grpc connection evicted from pool") + store.ClientConn.Close() }) if err != nil { return nil, fmt.Errorf("could not initialize connection pool cache: %w", err) @@ -153,6 +155,7 @@ func NewBuilder(log zerolog.Logger, ConnectionsCache: cache, CacheSize: cacheSize, AccessMetrics: accessMetrics, + Log: log, } backend := backend.New(state, diff --git a/module/metrics.go b/module/metrics.go index 05957f46a2d..03e26b870f6 100644 --- a/module/metrics.go +++ b/module/metrics.go @@ -371,6 +371,9 @@ type AccessMetrics interface { // ConnectionFromPoolRetrieved tracks the number of times a connection to a collection/execution node is retrieved from the connection pool ConnectionFromPoolRetrieved() + + // ConnectionAddedToPool tracks the number of times a collection/execution node is added to the connection pool + ConnectionAddedToPool() } type ExecutionMetrics interface { diff --git a/module/metrics/access.go b/module/metrics/access.go index 9bf86e8a161..9f8d978cb46 100644 --- a/module/metrics/access.go +++ b/module/metrics/access.go @@ -6,8 +6,9 @@ import ( ) type AccessCollector struct { - connectionReused prometheus.Counter - connectionAddedToPool *prometheus.GaugeVec + connectionReused prometheus.Counter + connectionsInPool *prometheus.GaugeVec + connectionAdded prometheus.Counter } func NewAccessCollector() *AccessCollector { @@ -18,12 +19,18 @@ func NewAccessCollector() *AccessCollector { Subsystem: subsystemConnectionPool, Help: "counter for the number of times connections get reused", }), - connectionAddedToPool: promauto.NewGaugeVec(prometheus.GaugeOpts{ - Name: "connection_added", + connectionsInPool: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "connections_in_pool", Namespace: namespaceAccess, Subsystem: subsystemConnectionPool, Help: "counter for the number of connections in the pool against max number tne pool can hold", }, []string{"result"}), + connectionAdded: promauto.NewCounter(prometheus.CounterOpts{ + Name: "connection_added", + Namespace: namespaceAccess, + Subsystem: subsystemConnectionPool, + Help: "counter for the number of times connections are added", + }), } return ac @@ -34,6 +41,10 @@ func (ac *AccessCollector) ConnectionFromPoolRetrieved() { } func (ac *AccessCollector) TotalConnectionsInPool(connectionCount uint, connectionPoolSize uint) { - ac.connectionAddedToPool.WithLabelValues("connections").Set(float64(connectionCount)) - ac.connectionAddedToPool.WithLabelValues("pool_size").Set(float64(connectionPoolSize)) + ac.connectionsInPool.WithLabelValues("connections").Set(float64(connectionCount)) + ac.connectionsInPool.WithLabelValues("pool_size").Set(float64(connectionPoolSize)) +} + +func (ac *AccessCollector) ConnectionAddedToPool() { + ac.connectionAdded.Inc() } diff --git a/module/metrics/noop.go b/module/metrics/noop.go index 1c4f02659b4..12d4e58986b 100644 --- a/module/metrics/noop.go +++ b/module/metrics/noop.go @@ -112,6 +112,7 @@ func (nc *NoopCollector) OnChunkConsumerJobDone(uint64) func (nc *NoopCollector) OnChunkDataPackResponseReceivedFromNetworkByRequester() {} func (nc *NoopCollector) TotalConnectionsInPool(connectionCount uint, connectionPoolSize uint) {} func (nc *NoopCollector) ConnectionFromPoolRetrieved() {} +func (nc *NoopCollector) ConnectionAddedToPool() {} func (nc *NoopCollector) StartBlockReceivedToExecuted(blockID flow.Identifier) {} func (nc *NoopCollector) FinishBlockReceivedToExecuted(blockID flow.Identifier) {} func (nc *NoopCollector) ExecutionComputationUsedPerBlock(computation uint64) {} From 75ec2769559c520026aa4bb60553177ee29705d0 Mon Sep 17 00:00:00 2001 From: Misha Date: Fri, 15 Jul 2022 14:40:22 -0400 Subject: [PATCH 203/223] fix flaky test: TestStaticEpochTransition (#2698) * added timestamp to logs for block_state, testnet_state_tracker * added CI flaky test debug workflow for TestEpochStaticTransition/TestStaticEpochTransition * fixed typo in test-category in CI file * fixed docker build image target * updated workflow to run cron scheduler on non-master branch * fixed bug in CI file - flaky-test-debug.yml * Update static epoch test to use same testing flow (#2761) * update static epoch test to use same testing flow * updated bft-tests.yml for debugging TestEpochStaticTransition/TestStaticEpochTransition * added scheduled run every hour * kick off another CI test run for TestEpochStaticTransition/TestStaticEpochTransition #1 * kick off another CI test run for TestEpochStaticTransition/TestStaticEpochTransition #2 * #3 kick off another CI test run for TestEpochStaticTransition/TestStaticEpochTransition * #4 kick off another CI test run for TestEpochStaticTransition/TestStaticEpochTransition * #5 kick off another CI test run for TestEpochStaticTransition/TestStaticEpochTransition * #6 kick off another CI test run for TestEpochStaticTransition/TestStaticEpochTransition * #7 kick off another CI test run for TestEpochStaticTransition/TestStaticEpochTransition * #8 kick off another CI test run for TestEpochStaticTransition/TestStaticEpochTransition * #9 kick off another CI test run for TestEpochStaticTransition/TestStaticEpochTransition * #10 kick off another CI test run for TestEpochStaticTransition/TestStaticEpochTransition * remove sleep * Dummy commit * dummy commit * dummy commit * dummy commit * dummy commit * dummy commit * dummy commit * dummy commit * dummy commit * dummy commit * 7699 dummy commit * 3651 dummy commit * 21538 dummy commit * 7108 dummy commit * 7186 dummy commit * 1772 dummy commit * 30464 dummy commit * 15633 dummy commit * 11129 dummy commit * 15839 dummy commit * remove dummy file * remove dummy file Co-authored-by: gomisha * minor cleanup * more minor cleanup Co-authored-by: Jordan Schalm --- .github/workflows/flaky-test-debug.yml | 45 +++++++ .../epochs/epoch_join_and_leave_an_test.go | 3 +- .../epochs/epoch_static_transition_test.go | 126 ++++-------------- integration/tests/epochs/suite.go | 28 +++- integration/tests/lib/block_state.go | 17 +-- .../tests/lib/testnet_state_tracker.go | 13 +- 6 files changed, 116 insertions(+), 116 deletions(-) create mode 100644 .github/workflows/flaky-test-debug.yml diff --git a/.github/workflows/flaky-test-debug.yml b/.github/workflows/flaky-test-debug.yml new file mode 100644 index 00000000000..0fa0fc69c19 --- /dev/null +++ b/.github/workflows/flaky-test-debug.yml @@ -0,0 +1,45 @@ +name: Flaky Test Debug + +on: + schedule: + - cron: '0 */1 * * *' # every 1 hour + push: + branches: + - '**/*flaky-test*' +env: + GO_VERSION: 1.18 + +jobs: + integration-test: + name: Integration Tests + strategy: + fail-fast: false + matrix: + test-category: + - integration-epochs + env: + TEST_CATEGORY: ${{ matrix.test-category }} + runs-on: ubuntu-latest + steps: + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + - name: Checkout repo + uses: actions/checkout@v2 + - name: Build relic + run: make crypto_setup_gopath + - name: Docker build + run: make docker-build-flow + - name: Run tests + if: github.actor != 'bors[bot]' + run: ./tools/test_monitor/run-tests.sh + - name: Run tests (Bors) + if: github.actor == 'bors[bot]' + uses: nick-invision/retry@v2 + with: + timeout_minutes: 15 + max_attempts: 2 + command: ./tools/test_monitor/run-tests.sh + + diff --git a/integration/tests/epochs/epoch_join_and_leave_an_test.go b/integration/tests/epochs/epoch_join_and_leave_an_test.go index fd132940cc6..8387b1b939c 100644 --- a/integration/tests/epochs/epoch_join_and_leave_an_test.go +++ b/integration/tests/epochs/epoch_join_and_leave_an_test.go @@ -3,9 +3,10 @@ package epochs import ( "testing" + "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" - "github.com/stretchr/testify/suite" ) func TestEpochJoinAndLeaveAN(t *testing.T) { diff --git a/integration/tests/epochs/epoch_static_transition_test.go b/integration/tests/epochs/epoch_static_transition_test.go index bd8ed6a5760..1b54d4b0f5e 100644 --- a/integration/tests/epochs/epoch_static_transition_test.go +++ b/integration/tests/epochs/epoch_static_transition_test.go @@ -3,13 +3,10 @@ package epochs import ( "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/integration/testnet" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/state/protocol" ) func TestEpochStaticTransition(t *testing.T) { @@ -33,107 +30,40 @@ func (s *StaticEpochTransitionSuite) SetupTest() { s.Suite.SetupTest() } -// TestStaticEpochTransition asserts epoch state transitions over two full epochs -// without any nodes joining or leaving. +// TestStaticEpochTransition asserts epoch state transitions over full epoch +// without any nodes joining or leaving. In particular, we assert that we enter +// the EpochSetup phase, then successfully enter the second epoch (implying a +// successful DKG). +// This is equivalent to runTestEpochJoinAndLeave, without any committee changes. func (s *StaticEpochTransitionSuite) TestStaticEpochTransition() { - // phaseCheck is a utility struct that contains information about the - // final view of each epoch/phase. - type phaseCheck struct { - epoch uint64 - phase flow.EpochPhase - finalView uint64 // the final view of the phase as defined by the EpochSetup - } + s.TimedLogf("waiting for EpochSetup phase of first epoch to begin") + s.WaitForPhase(s.ctx, flow.EpochPhaseSetup) + s.TimedLogf("successfully reached EpochSetup phase of first epoch") - phaseChecks := []*phaseCheck{} - // iterate through two epochs and populate a list of phase checks - for counter := 0; counter < 2; counter++ { + snapshot, err := s.client.GetLatestProtocolSnapshot(s.ctx) + require.NoError(s.T(), err) - // wait until the access node reaches the desired epoch - var epoch protocol.Epoch - var epochCounter uint64 - for epoch == nil || epochCounter != uint64(counter) { - snapshot, err := s.client.GetLatestProtocolSnapshot(s.ctx) - require.NoError(s.T(), err) - epoch = snapshot.Epochs().Current() - epochCounter, err = epoch.Counter() - require.NoError(s.T(), err) - } + header, err := snapshot.Head() + require.NoError(s.T(), err) + s.TimedLogf("retrieved header after entering EpochSetup phase: height=%d, view=%d", header.Height, header.View) - epochFirstView, err := epoch.FirstView() - require.NoError(s.T(), err) - epochDKGPhase1Final, err := epoch.DKGPhase1FinalView() - require.NoError(s.T(), err) - epochDKGPhase2Final, err := epoch.DKGPhase2FinalView() - require.NoError(s.T(), err) - epochDKGPhase3Final, err := epoch.DKGPhase3FinalView() - require.NoError(s.T(), err) - epochFinal, err := epoch.FinalView() - require.NoError(s.T(), err) + epoch1FinalView, err := snapshot.Epochs().Current().FinalView() + require.NoError(s.T(), err) + epoch1Counter, err := snapshot.Epochs().Current().Counter() + require.NoError(s.T(), err) - epochViews := []*phaseCheck{ - {epoch: epochCounter, phase: flow.EpochPhaseStaking, finalView: epochFirstView}, - {epoch: epochCounter, phase: flow.EpochPhaseSetup, finalView: epochDKGPhase1Final}, - {epoch: epochCounter, phase: flow.EpochPhaseSetup, finalView: epochDKGPhase2Final}, - {epoch: epochCounter, phase: flow.EpochPhaseSetup, finalView: epochDKGPhase3Final}, - {epoch: epochCounter, phase: flow.EpochPhaseCommitted, finalView: epochFinal}, - } + // wait for the final view of the first epoch + s.TimedLogf("waiting for the final view (%d) of epoch %d", epoch1FinalView, epoch1Counter) + s.BlockState.WaitForSealedView(s.T(), epoch1FinalView+5) + s.TimedLogf("sealed final view (%d) of epoch %d", epoch1FinalView, epoch1Counter) - for _, v := range epochViews { - s.BlockState.WaitForSealedView(s.T(), v.finalView) - } + // assert transition to second epoch happened as expected + // if counter is still 0, epoch emergency fallback was triggered and we can fail early + s.assertEpochCounter(s.ctx, 1) - phaseChecks = append(phaseChecks, epochViews...) - } - - s.net.StopContainers() - - consensusContainers := make([]*testnet.Container, 0) - for _, c := range s.net.Containers { - if c.Config.Role == flow.RoleConsensus { - consensusContainers = append(consensusContainers, c) - } - } - - for _, c := range consensusContainers { - containerState, err := c.OpenState() - require.NoError(s.T(), err) - - // create a map of [view] => {epoch-counter, phase} - lookup := map[uint64]struct { - epochCounter uint64 - phase flow.EpochPhase - }{} - - final, err := containerState.Final().Head() - require.NoError(s.T(), err) - - var h uint64 - for h = 0; h <= final.Height; h++ { - snapshot := containerState.AtHeight(h) - - head, err := snapshot.Head() - require.NoError(s.T(), err) - - epoch := snapshot.Epochs().Current() - currentEpochCounter, err := epoch.Counter() - require.NoError(s.T(), err) - currentPhase, err := snapshot.Phase() - require.NoError(s.T(), err) - - lookup[head.View] = struct { - epochCounter uint64 - phase flow.EpochPhase - }{ - currentEpochCounter, - currentPhase, - } - } - - for _, v := range phaseChecks { - item := lookup[v.finalView] - assert.Equal(s.T(), v.epoch, item.epochCounter, "wrong epoch at view %d", v.finalView) - assert.Equal(s.T(), v.phase, item.phase, "wrong phase at view %d", v.finalView) - } - } + // submit a smoke test transaction to verify the network can seal a transaction + s.TimedLogf("sending smoke test transaction in second epoch") + s.submitSmokeTestTransaction(s.ctx) + s.TimedLogf("successfully submitted and observed sealing of smoke test transaction") } diff --git a/integration/tests/epochs/suite.go b/integration/tests/epochs/suite.go index dc0aaad727b..dc134caf583 100644 --- a/integration/tests/epochs/suite.go +++ b/integration/tests/epochs/suite.go @@ -129,6 +129,12 @@ func (s *Suite) Ghost() *client.GhostClient { return client } +// TimedLogf logs the message using t.Log, but prefixes the current time. +func (s *Suite) TimedLogf(msg string, args ...interface{}) { + args = append([]interface{}{time.Now().String()}, args...) + s.T().Logf("%s - "+msg, args...) +} + func (s *Suite) TearDownTest() { s.log.Info().Msg("================> Start TearDownTest") s.net.Remove() @@ -762,13 +768,19 @@ func (s *Suite) runTestEpochJoinAndLeave(role flow.Role, checkNetworkHealth node } // staking our new node and add get the corresponding container for that node + s.TimedLogf("staking joining node with role %s", role.String()) info, testContainer := s.StakeNewNode(s.ctx, env, role) + s.TimedLogf("successfully staked joining node: %s", info.NodeID) // use admin transaction to remove node, this simulates a node leaving the network + s.TimedLogf("removing node %s with role %s", containerToReplace.Config.NodeID, role.String()) s.removeNodeFromProtocol(s.ctx, env, containerToReplace.Config.NodeID) + s.TimedLogf("successfully removed node: %s", containerToReplace.Config.NodeID) // wait for epoch setup phase before we start our container and pause the old container + s.TimedLogf("waiting for EpochSetup phase of first epoch to begin") s.WaitForPhase(s.ctx, flow.EpochPhaseSetup) + s.TimedLogf("successfully reached EpochSetup phase of first epoch") // get latest snapshot and start new container snapshot, err := s.client.GetLatestProtocolSnapshot(s.ctx) @@ -776,13 +788,19 @@ func (s *Suite) runTestEpochJoinAndLeave(role flow.Role, checkNetworkHealth node testContainer.WriteRootSnapshot(snapshot) testContainer.Container.Start(s.ctx) - currentEpochFinalView, err := snapshot.Epochs().Current().FinalView() + header, err := snapshot.Head() + require.NoError(s.T(), err) + s.TimedLogf("retrieved header after entering EpochSetup phase: height=%d, view=%d", header.Height, header.View) + + epoch1FinalView, err := snapshot.Epochs().Current().FinalView() require.NoError(s.T(), err) // wait for 5 views after the start of the next epoch before we pause our container to replace - s.BlockState.WaitForSealedView(s.T(), currentEpochFinalView+5) + s.TimedLogf("waiting for sealed view %d before pausing container", epoch1FinalView+5) + s.BlockState.WaitForSealedView(s.T(), epoch1FinalView+5) + s.TimedLogf("observed sealed view %d -> pausing container", epoch1FinalView+5) - //make sure container to replace removed from smart contract state + // make sure container to replace removed from smart contract state s.assertNodeNotApprovedOrProposed(s.ctx, env, containerToReplace.Config.NodeID) // assert transition to second epoch happened as expected @@ -793,7 +811,9 @@ func (s *Suite) runTestEpochJoinAndLeave(role flow.Role, checkNetworkHealth node require.NoError(s.T(), err) // wait for 5 views after pausing our container to replace before we assert healthy network - s.BlockState.WaitForSealedView(s.T(), currentEpochFinalView+10) + s.TimedLogf("waiting for sealed view %d before asserting network health", epoch1FinalView+10) + s.BlockState.WaitForSealedView(s.T(), epoch1FinalView+10) + s.TimedLogf("observed sealed view %d -> asserting network health", epoch1FinalView+10) // make sure the network is healthy after adding new node checkNetworkHealth(s.ctx, env, snapshot, info) diff --git a/integration/tests/lib/block_state.go b/integration/tests/lib/block_state.go index 1163643d7fa..887a2ca5d67 100644 --- a/integration/tests/lib/block_state.go +++ b/integration/tests/lib/block_state.go @@ -60,7 +60,7 @@ func (bs *BlockState) WaitForBlockById(t *testing.T, blockId flow.Identifier) *m defer bs.RUnlock() if block, ok := bs.blocksByID[blockId]; !ok { - t.Logf("pending for block id: %x\n", blockId) + t.Logf("%v pending for block id: %x\n", time.Now().UTC(), blockId) return false } else { blockProposal = block @@ -78,7 +78,7 @@ func (bs *BlockState) WaitForBlockById(t *testing.T, blockId flow.Identifier) *m // It also processes the seals of blocks being finalized. func (bs *BlockState) processAncestors(t *testing.T, b *messages.BlockProposal, confirmsHeight uint64) { // puts this block proposal and all ancestors into `finalizedByHeight` - t.Logf("new height arrived: %d\n", b.Header.Height) + t.Logf("%v new height arrived: %d\n", time.Now().UTC(), b.Header.Height) ancestor, ok := b, true for ancestor.Header.Height > bs.highestFinalized { heightDistance := b.Header.Height - ancestor.Header.Height @@ -97,7 +97,8 @@ func (bs *BlockState) processAncestors(t *testing.T, b *messages.BlockProposal, if finalized.Header.Height > bs.highestFinalized { // updates highestFinalized height bs.highestFinalized = finalized.Header.Height } - t.Logf("height %d finalized %d, highest finalized %d \n", + t.Logf("%v height %d finalized %d, highest finalized %d \n", + time.Now().UTC(), b.Header.Height, finalized.Header.Height, bs.highestFinalized) @@ -114,8 +115,8 @@ func (bs *BlockState) processAncestors(t *testing.T, b *messages.BlockProposal, } } } else { - t.Logf("fork detected: view distance (%d) between received block and ancestor is not same as their height distance (%d)\n", - viewDistance, heightDistance) + t.Logf("%v fork detected: view distance (%d) between received block and ancestor is not same as their height distance (%d)\n", + time.Now().UTC(), viewDistance, heightDistance) } } @@ -139,7 +140,7 @@ func (bs *BlockState) WaitForHighestFinalizedProgress(t *testing.T) *messages.Bl bs.RLock() // avoiding concurrent map access defer bs.RUnlock() - t.Logf("checking highest finalized: %d, highest proposed: %d\n", bs.highestFinalized, bs.highestProposed) + t.Logf("%v checking highest finalized: %d, highest proposed: %d\n", time.Now().UTC(), bs.highestFinalized, bs.highestProposed) return bs.highestFinalized > currentFinalized }, blockStateTimeout, 100*time.Millisecond, fmt.Sprintf("did not receive progress on highest finalized height (%v) from (%v) within %v seconds", @@ -201,7 +202,7 @@ func (bs *BlockState) WaitForSealed(t *testing.T, height uint64) *messages.Block require.Eventually(t, func() bool { if bs.highestSealed != nil { - t.Logf("waiting for sealed height (%d/%d), last finalized %d", bs.highestSealed.Header.Height, height, bs.highestFinalized) + t.Logf("%v waiting for sealed height (%d/%d), last finalized %d", time.Now().UTC(), bs.highestSealed.Header.Height, height, bs.highestFinalized) } return bs.highestSealed != nil && bs.highestSealed.Header.Height >= height }, @@ -224,7 +225,7 @@ func (bs *BlockState) WaitForSealedView(t *testing.T, view uint64) *messages.Blo require.Eventually(t, func() bool { if bs.highestSealed != nil { - t.Logf("waiting for sealed view (%d/%d)", bs.highestSealed.Header.View, view) + t.Logf("%v waiting for sealed view (%d/%d)", time.Now().UTC(), bs.highestSealed.Header.View, view) } return bs.highestSealed != nil && bs.highestSealed.Header.View >= view }, diff --git a/integration/tests/lib/testnet_state_tracker.go b/integration/tests/lib/testnet_state_tracker.go index e449c316a3e..3f3121a64c9 100644 --- a/integration/tests/lib/testnet_state_tracker.go +++ b/integration/tests/lib/testnet_state_tracker.go @@ -46,7 +46,7 @@ func (tst *TestnetStateTracker) Track(t *testing.T, ctx context.Context, ghost * var err error reader, err = ghost.Subscribe(context.Background()) if err != nil { - t.Logf("error subscribing to ghost: %v\n", err) + t.Logf("%v error subscribing to ghost: %v\n", time.Now().UTC(), err) } else { retry = false } @@ -85,14 +85,16 @@ func (tst *TestnetStateTracker) Track(t *testing.T, ctx context.Context, ghost * switch m := msg.(type) { case *messages.BlockProposal: tst.BlockState.Add(t, m) - t.Logf("block proposal received from %s at height %v, view %v: %x\n", + t.Logf("%v block proposal received from %s at height %v, view %v: %x\n", + time.Now().UTC(), sender, m.Header.Height, m.Header.View, m.Header.ID()) case *flow.ResultApproval: tst.ApprovalState.Add(sender, m) - t.Logf("result approval received from %s for execution result ID %x and chunk index %v\n", + t.Logf("%v result approval received from %s for execution result ID %x and chunk index %v\n", + time.Now().UTC(), sender, m.Body.ExecutionResultID, m.Body.ChunkIndex) @@ -101,7 +103,8 @@ func (tst *TestnetStateTracker) Track(t *testing.T, ctx context.Context, ghost * finalState, err := m.ExecutionResult.FinalStateCommitment() require.NoError(t, err) tst.ReceiptState.Add(m) - t.Logf("execution receipts received from %s for block ID %x by executor ID %x with final state %x result ID %x chunks %d\n", + t.Logf("%v execution receipts received from %s for block ID %x by executor ID %x with final state %x result ID %x chunks %d\n", + time.Now().UTC(), sender, m.ExecutionResult.BlockID, m.ExecutorID, @@ -110,7 +113,7 @@ func (tst *TestnetStateTracker) Track(t *testing.T, ctx context.Context, ghost * len(m.ExecutionResult.Chunks)) default: - t.Logf("other msg received from %s: %#v\n", sender, msg) + t.Logf("%v other msg received from %s: %#v\n", time.Now().UTC(), sender, msg) continue } } From dcb363652bf44387d9336256271bfeba4e4cf294 Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Fri, 15 Jul 2022 13:08:14 -0700 Subject: [PATCH 204/223] chore(trace): rename file --- module/trace/{logTracer.go => log_tracer.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename module/trace/{logTracer.go => log_tracer.go} (100%) diff --git a/module/trace/logTracer.go b/module/trace/log_tracer.go similarity index 100% rename from module/trace/logTracer.go rename to module/trace/log_tracer.go From 9652b94c903adc3839f0c11c31acd4c302aa5cef Mon Sep 17 00:00:00 2001 From: "Ramtin M. Seraj" Date: Fri, 15 Jul 2022 16:56:26 -0700 Subject: [PATCH 205/223] [FVM] removing legacy controller from storage (#2713) * remove legacy controller * fix tests * update chunk verifier * update test * update emulator to support controller removal * update flow protobuf version * go mod tidy * apply PR's comments * update flow proto files * update flow emulator version * go mod tidy * update state commitment * no in-place payload update * expected update to state commitments * update recent changes * apply PR's comment --- .../execution_state_extract_test.go | 17 +- .../read-execution-state/list-accounts/cmd.go | 4 +- .../account_status_migration_test.go | 22 +- cmd/util/ledger/migrations/accounts.go | 40 ++-- .../migrations/legacy_controller_migration.go | 38 ++-- .../legacy_controller_migration_test.go | 56 +++-- .../migrations/storage_fees_migration.go | 5 +- .../storage_used_update_migration_test.go | 7 +- cmd/util/ledger/migrations/utils.go | 10 +- cmd/util/ledger/reporters/atree_reporter.go | 1 - .../computation/committer/committer_test.go | 1 - .../computation/computer/computer_test.go | 29 ++- engine/execution/computation/manager_test.go | 4 +- engine/execution/ingestion/engine.go | 6 +- engine/execution/ingestion/ingest_rpc.go | 2 +- engine/execution/ingestion/mock/ingest_rpc.go | 14 +- engine/execution/rpc/engine.go | 5 +- engine/execution/rpc/engine_test.go | 18 +- .../state/bootstrap/bootstrap_test.go | 2 +- engine/execution/state/delta/delta.go | 14 +- engine/execution/state/delta/delta_test.go | 62 +++--- engine/execution/state/delta/view.go | 32 +-- engine/execution/state/delta/view_test.go | 204 +++++++++--------- engine/execution/state/state.go | 22 +- engine/execution/state/state_test.go | 34 +-- fvm/accounts_test.go | 6 +- fvm/errors/execution.go | 15 +- fvm/fvm_test.go | 2 +- fvm/handler/programs_test.go | 20 +- fvm/mock/state/ledger.go | 44 ++-- fvm/mock/state/view.go | 44 ++-- fvm/state/accounts.go | 9 +- fvm/state/accounts_test.go | 24 ++- fvm/state/address_generator.go | 6 +- fvm/state/address_generator_test.go | 6 +- fvm/state/state.go | 106 +++++---- fvm/state/state_test.go | 99 ++++----- fvm/state/uuids.go | 4 +- fvm/state/view.go | 8 +- fvm/transactionInvoker_test.go | 6 - fvm/utils/test.go | 45 ++-- go.mod | 2 +- go.sum | 4 +- integration/go.mod | 4 +- integration/go.sum | 4 +- ledger/partial/ledger_test.go | 4 +- model/flow/ledger.go | 25 +-- model/flow/ledger_test.go | 21 +- module/chunks/chunkVerifier.go | 4 +- module/chunks/chunkVerifier_test.go | 20 +- storage/badger/operation/interactions_test.go | 14 +- utils/debug/registerCache.go | 27 +-- utils/debug/remoteView.go | 27 ++- utils/unittest/execution_state.go | 2 +- 54 files changed, 613 insertions(+), 638 deletions(-) diff --git a/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go b/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go index edb029fce3a..af5a9f639f3 100644 --- a/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go +++ b/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go @@ -192,17 +192,17 @@ func TestExtractExecutionState(t *testing.T) { func getSampleKeyValues(i int) ([]ledger.Key, []ledger.Value) { switch i { case 0: - return []ledger.Key{getKey("", "", "uuid"), getKey("", "", "account_address_state")}, + return []ledger.Key{getKey("", "uuid"), getKey("", "account_address_state")}, []ledger.Value{[]byte{'1'}, []byte{'A'}} case 1: - return []ledger.Key{getKey("ADDRESS", "ADDRESS", "public_key_count"), - getKey("ADDRESS", "ADDRESS", "public_key_0"), - getKey("ADDRESS", "", "exists"), - getKey("ADDRESS", "", "storage_used")}, + return []ledger.Key{getKey("ADDRESS", "public_key_count"), + getKey("ADDRESS", "public_key_0"), + getKey("ADDRESS", "exists"), + getKey("ADDRESS", "storage_used")}, []ledger.Value{[]byte{1}, []byte("PUBLICKEYXYZ"), []byte{1}, []byte{100}} case 2: // TODO change the contract_names to CBOR encoding - return []ledger.Key{getKey("ADDRESS", "ADDRESS", "contract_names"), getKey("ADDRESS", "ADDRESS", "code.mycontract")}, + return []ledger.Key{getKey("ADDRESS", "contract_names"), getKey("ADDRESS", "code.mycontract")}, []ledger.Value{[]byte("mycontract"), []byte("CONTRACT Content")} default: keys := make([]ledger.Key, 0) @@ -213,17 +213,16 @@ func getSampleKeyValues(i int) ([]ledger.Key, []ledger.Value) { if err != nil { panic(err) } - keys = append(keys, getKey(string(address), "", "test")) + keys = append(keys, getKey(string(address), "test")) values = append(values, getRandomCadenceValue()) } return keys, values } } -func getKey(owner, controller, key string) ledger.Key { +func getKey(owner, key string) ledger.Key { return ledger.Key{KeyParts: []ledger.KeyPart{ {Type: uint16(0), Value: []byte(owner)}, - {Type: uint16(1), Value: []byte(controller)}, {Type: uint16(2), Value: []byte(key)}, }, } diff --git a/cmd/util/cmd/read-execution-state/list-accounts/cmd.go b/cmd/util/cmd/read-execution-state/list-accounts/cmd.go index cef6e1adf65..47beaea0b9a 100644 --- a/cmd/util/cmd/read-execution-state/list-accounts/cmd.go +++ b/cmd/util/cmd/read-execution-state/list-accounts/cmd.go @@ -74,9 +74,9 @@ func run(*cobra.Command, []string) { log.Fatal().Err(err).Msgf("invalid chain name") } - ldg := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + ldg := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { - ledgerKey := executionState.RegisterIDToKey(flow.NewRegisterID(owner, controller, key)) + ledgerKey := executionState.RegisterIDToKey(flow.NewRegisterID(owner, key)) path, err := pathfinder.KeyToPath(ledgerKey, complete.DefaultPathFinderVersion) if err != nil { log.Fatal().Err(err).Msgf("cannot convert key to path") diff --git a/cmd/util/ledger/migrations/account_status_migration_test.go b/cmd/util/ledger/migrations/account_status_migration_test.go index 9e50de4ef5f..942dc65731f 100644 --- a/cmd/util/ledger/migrations/account_status_migration_test.go +++ b/cmd/util/ledger/migrations/account_status_migration_test.go @@ -13,16 +13,6 @@ import ( "github.com/onflow/flow-go/ledger/common/utils" ) -func createAccountPayloadKey(a flow.Address, key string) ledger.Key { - return ledger.Key{ - KeyParts: []ledger.KeyPart{ - ledger.NewKeyPart(0, a.Bytes()), - ledger.NewKeyPart(1, []byte("")), - ledger.NewKeyPart(2, []byte(key)), - }, - } -} - func TestAccountStatusMigration(t *testing.T) { mig := AccountStatusMigration{ Logger: zerolog.Logger{}, @@ -32,11 +22,11 @@ func TestAccountStatusMigration(t *testing.T) { address2 := flow.HexToAddress("0x2") payloads := []ledger.Payload{ - {Key: createAccountPayloadKey(address1, state.KeyStorageUsed), Value: utils.Uint64ToBinary(1)}, - {Key: createAccountPayloadKey(address1, "other registers"), Value: utils.Uint64ToBinary(2)}, - {Key: createAccountPayloadKey(address2, "other registers2"), Value: utils.Uint64ToBinary(3)}, - {Key: createAccountPayloadKey(address1, KeyExists), Value: []byte{1}}, - {Key: createAccountPayloadKey(address1, KeyAccountFrozen), Value: []byte{1}}, + {Key: createPayloadKeyWithLegacyController(address1, state.KeyStorageUsed, true), Value: utils.Uint64ToBinary(1)}, + {Key: createPayloadKeyWithLegacyController(address1, "other registers", true), Value: utils.Uint64ToBinary(2)}, + {Key: createPayloadKeyWithLegacyController(address2, "other registers2", true), Value: utils.Uint64ToBinary(3)}, + {Key: createPayloadKeyWithLegacyController(address1, KeyExists, true), Value: []byte{1}}, + {Key: createPayloadKeyWithLegacyController(address1, KeyAccountFrozen, true), Value: []byte{1}}, } newPayloads, err := mig.Migrate(payloads) @@ -48,7 +38,7 @@ func TestAccountStatusMigration(t *testing.T) { require.True(t, newPayloads[2].Equals(&payloads[2])) expectedPayload := &ledger.Payload{ - Key: createAccountPayloadKey(address1, state.KeyAccountStatus), + Key: createPayloadKeyWithLegacyController(address1, state.KeyAccountStatus, true), Value: state.NewAccountStatus().ToBytes(), } require.True(t, newPayloads[3].Equals(expectedPayload)) diff --git a/cmd/util/ledger/migrations/accounts.go b/cmd/util/ledger/migrations/accounts.go index 6f667456746..20b392d814b 100644 --- a/cmd/util/ledger/migrations/accounts.go +++ b/cmd/util/ledger/migrations/accounts.go @@ -44,12 +44,12 @@ func (v *view) MergeView(o state.View) error { return nil } -func (v *view) Set(owner, controller, key string, value flow.RegisterValue) error { - return v.Ledger.Set(owner, controller, key, value) +func (v *view) Set(owner, key string, value flow.RegisterValue) error { + return v.Ledger.Set(owner, key, value) } -func (v *view) Get(owner, controller, key string) (flow.RegisterValue, error) { - value, err := v.Ledger.Get(owner, controller, key) +func (v *view) Get(owner, key string) (flow.RegisterValue, error) { + value, err := v.Ledger.Get(owner, key) if err != nil { return nil, err } @@ -58,18 +58,18 @@ func (v *view) Get(owner, controller, key string) (flow.RegisterValue, error) { } if v.Parent != nil { - return v.Parent.Get(owner, controller, key) + return v.Parent.Get(owner, key) } return nil, nil } -func (v *view) Touch(owner, controller, key string) error { - return v.Ledger.Touch(owner, controller, key) +func (v *view) Touch(owner, key string) error { + return v.Ledger.Touch(owner, key) } -func (v *view) Delete(owner, controller, key string) error { - return v.Ledger.Delete(owner, controller, key) +func (v *view) Delete(owner, key string) error { + return v.Ledger.Delete(owner, key) } func (v *view) Payloads() []ledger.Payload { @@ -88,27 +88,26 @@ type led struct { payloads map[string]ledger.Payload } -func (l *led) Set(owner, controller, key string, value flow.RegisterValue) error { +func (l *led) Set(owner, key string, value flow.RegisterValue) error { keyparts := []ledger.KeyPart{ledger.NewKeyPart(0, []byte(owner)), - ledger.NewKeyPart(1, []byte(controller)), ledger.NewKeyPart(2, []byte(key))} - fk := fullKey(owner, controller, key) + fk := fullKey(owner, key) l.payloads[fk] = ledger.Payload{Key: ledger.NewKey(keyparts), Value: ledger.Value(value)} return nil } -func (l *led) Get(owner, controller, key string) (flow.RegisterValue, error) { - fk := fullKey(owner, controller, key) +func (l *led) Get(owner, key string) (flow.RegisterValue, error) { + fk := fullKey(owner, key) return flow.RegisterValue(l.payloads[fk].Value), nil } -func (l *led) Delete(owner, controller, key string) error { - fk := fullKey(owner, controller, key) +func (l *led) Delete(owner, key string) error { + fk := fullKey(owner, key) delete(l.payloads, fk) return nil } -func (l *led) Touch(owner, controller, key string) error { +func (l *led) Touch(owner, key string) error { return nil } @@ -124,8 +123,7 @@ func newLed(payloads []ledger.Payload) *led { mapping := make(map[string]ledger.Payload) for _, p := range payloads { fk := fullKey(string(p.Key.KeyParts[0].Value), - string(p.Key.KeyParts[1].Value), - string(p.Key.KeyParts[2].Value)) + string(p.Key.KeyParts[1].Value)) mapping[fk] = p } @@ -134,6 +132,6 @@ func newLed(payloads []ledger.Payload) *led { } } -func fullKey(owner, controller, key string) string { - return strings.Join([]string{owner, controller, key}, "\x1F") +func fullKey(owner, key string) string { + return strings.Join([]string{owner, key}, "\x1F") } diff --git a/cmd/util/ledger/migrations/legacy_controller_migration.go b/cmd/util/ledger/migrations/legacy_controller_migration.go index bc95595ed0d..daa4834570b 100644 --- a/cmd/util/ledger/migrations/legacy_controller_migration.go +++ b/cmd/util/ledger/migrations/legacy_controller_migration.go @@ -6,7 +6,8 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/fvm/state" + "github.com/onflow/flow-go/engine/execution/state" + fvmState "github.com/onflow/flow-go/fvm/state" "github.com/onflow/flow-go/ledger" ) @@ -19,29 +20,30 @@ type LegacyControllerMigration struct { } func (lc *LegacyControllerMigration) Migrate(payload []ledger.Payload) ([]ledger.Payload, error) { - for _, p := range payload { + newPayloads := make([]ledger.Payload, len(payload)) + for i, p := range payload { owner := p.Key.KeyParts[0].Value controller := p.Key.KeyParts[1].Value key := p.Key.KeyParts[2].Value if len(controller) > 0 { - if bytes.Equal(owner, controller) { - // - if string(key) == state.KeyPublicKeyCount || // case - public key count - bytes.HasPrefix(key, []byte("public_key_")) || // case - public keys - string(key) == state.KeyContractNames || // case - contract names - bytes.HasPrefix(key, []byte(state.KeyCode)) { // case - contracts - p.Key.KeyParts[1].Value = []byte("") - continue - } + if bytes.Equal(owner, controller) && + string(key) != fvmState.KeyPublicKeyCount && // case - public key count + !bytes.HasPrefix(key, []byte("public_key_")) && // case - public keys + string(key) != fvmState.KeyContractNames && // case - contract names + !bytes.HasPrefix(key, []byte(fvmState.KeyCode)) { // case - contracts + lc.Logger.Warn().Msgf("found an unexpected new case of non-empty controller use: %s, %s, %s", + hex.EncodeToString(owner), + hex.EncodeToString(controller), + hex.EncodeToString(key), + ) } - // else we have found an unexpected new case of non-empty controller use - lc.Logger.Warn().Msgf("found an unexpected new case of non-empty controller use: %s, %s, %s", - hex.EncodeToString(owner), - hex.EncodeToString(controller), - hex.EncodeToString(key), - ) } + newKey := ledger.NewKey([]ledger.KeyPart{ + ledger.NewKeyPart(state.KeyPartOwner, owner), + ledger.NewKeyPart(state.KeyPartKey, key), + }) + newPayloads[i] = *ledger.NewPayload(newKey, p.Value) } - return payload, nil + return newPayloads, nil } diff --git a/cmd/util/ledger/migrations/legacy_controller_migration_test.go b/cmd/util/ledger/migrations/legacy_controller_migration_test.go index d3d9bce757f..d5ebe1975dd 100644 --- a/cmd/util/ledger/migrations/legacy_controller_migration_test.go +++ b/cmd/util/ledger/migrations/legacy_controller_migration_test.go @@ -8,17 +8,38 @@ import ( "github.com/onflow/flow-go-sdk" - state "github.com/onflow/flow-go/fvm/state" + "github.com/onflow/flow-go/engine/execution/state" + fvmstate "github.com/onflow/flow-go/fvm/state" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/utils" ) -func createPayloadKeyWithLegacyController(a flow.Address, key string) ledger.Key { +const LegacyKeyPartController = 1 + +func createPayloadKeyWithLegacyController(a flow.Address, key string, emptyController bool) ledger.Key { + if emptyController { + return ledger.Key{ + KeyParts: []ledger.KeyPart{ + ledger.NewKeyPart(state.KeyPartOwner, a.Bytes()), + ledger.NewKeyPart(LegacyKeyPartController, []byte("")), + ledger.NewKeyPart(state.KeyPartKey, []byte(key)), + }, + } + } return ledger.Key{ KeyParts: []ledger.KeyPart{ - ledger.NewKeyPart(0, a.Bytes()), - ledger.NewKeyPart(1, a.Bytes()), - ledger.NewKeyPart(2, []byte(key)), + ledger.NewKeyPart(state.KeyPartOwner, a.Bytes()), + ledger.NewKeyPart(LegacyKeyPartController, a.Bytes()), + ledger.NewKeyPart(state.KeyPartKey, []byte(key)), + }, + } +} + +func createMigratedPayloadKey(a flow.Address, key string) ledger.Key { + return ledger.Key{ + KeyParts: []ledger.KeyPart{ + ledger.NewKeyPart(state.KeyPartOwner, a.Bytes()), + ledger.NewKeyPart(state.KeyPartKey, []byte(key)), }, } } @@ -32,18 +53,27 @@ func TestLegacyControllerMigration(t *testing.T) { address2 := flow.HexToAddress("0x2") payloads := []ledger.Payload{ - {Key: createAccountPayloadKey(address1, state.KeyStorageUsed), Value: utils.Uint64ToBinary(1)}, - {Key: createPayloadKeyWithLegacyController(address1, state.ContractKey("CoreContract")), Value: utils.Uint64ToBinary(2)}, - {Key: createPayloadKeyWithLegacyController(address1, state.KeyContractNames), Value: utils.Uint64ToBinary(3)}, - {Key: createPayloadKeyWithLegacyController(address2, state.KeyPublicKey(1)), Value: utils.Uint64ToBinary(4)}, - {Key: createPayloadKeyWithLegacyController(address2, state.KeyPublicKeyCount), Value: utils.Uint64ToBinary(4)}, + {Key: createPayloadKeyWithLegacyController(address1, fvmstate.KeyStorageUsed, false), Value: utils.Uint64ToBinary(1)}, + {Key: createPayloadKeyWithLegacyController(address1, fvmstate.ContractKey("CoreContract"), true), Value: utils.Uint64ToBinary(2)}, + {Key: createPayloadKeyWithLegacyController(address1, fvmstate.KeyContractNames, true), Value: utils.Uint64ToBinary(3)}, + {Key: createPayloadKeyWithLegacyController(address2, fvmstate.KeyPublicKey(1), true), Value: utils.Uint64ToBinary(4)}, + {Key: createPayloadKeyWithLegacyController(address2, fvmstate.KeyPublicKeyCount, true), Value: utils.Uint64ToBinary(4)}, + } + + expectedKeys := []ledger.Key{ + createMigratedPayloadKey(address1, fvmstate.KeyStorageUsed), + createMigratedPayloadKey(address1, fvmstate.ContractKey("CoreContract")), + createMigratedPayloadKey(address1, fvmstate.KeyContractNames), + createMigratedPayloadKey(address2, fvmstate.KeyPublicKey(1)), + createMigratedPayloadKey(address2, fvmstate.KeyPublicKeyCount), } newPayloads, err := mig.Migrate(payloads) require.NoError(t, err) - require.Equal(t, 5, len(newPayloads)) + require.Equal(t, len(payloads), len(newPayloads)) - for _, p := range newPayloads { - require.Equal(t, 0, len(p.Key.KeyParts[1].Value)) + for i, p := range newPayloads { + require.Equal(t, expectedKeys[i], p.Key) } + } diff --git a/cmd/util/ledger/migrations/storage_fees_migration.go b/cmd/util/ledger/migrations/storage_fees_migration.go index 5fb0fa54f73..ed8d4b9147e 100644 --- a/cmd/util/ledger/migrations/storage_fees_migration.go +++ b/cmd/util/ledger/migrations/storage_fees_migration.go @@ -31,9 +31,8 @@ func StorageFeesMigration(payload []ledger.Payload) ([]ledger.Payload, error) { newPayload = append(newPayload, ledger.Payload{ Key: registerIDToKey(flow.RegisterID{ - Owner: s, - Controller: "", - Key: "storage_used", + Owner: s, + Key: "storage_used", }), Value: utils.Uint64ToBinary(u), }) diff --git a/cmd/util/ledger/migrations/storage_used_update_migration_test.go b/cmd/util/ledger/migrations/storage_used_update_migration_test.go index 3fa28c23237..e00b853c1ae 100644 --- a/cmd/util/ledger/migrations/storage_used_update_migration_test.go +++ b/cmd/util/ledger/migrations/storage_used_update_migration_test.go @@ -35,7 +35,7 @@ func TestStorageUsedUpdateMigrationMigration(t *testing.T) { require.NoError(t, err) require.Equal(t, len(migratedPayload), len(payload)) - require.Equal(t, uint64(52), migratedSize) + require.Equal(t, uint64(48), migratedSize) }) t.Run("fix storage used if used to high", func(t *testing.T) { @@ -50,7 +50,7 @@ func TestStorageUsedUpdateMigrationMigration(t *testing.T) { require.NoError(t, err) require.Equal(t, len(migratedPayload), len(payload)) - require.Equal(t, uint64(52), migratedSize) + require.Equal(t, uint64(48), migratedSize) }) t.Run("do not fix storage used if storage used ok", func(t *testing.T) { @@ -65,7 +65,7 @@ func TestStorageUsedUpdateMigrationMigration(t *testing.T) { require.NoError(t, err) require.Equal(t, len(migratedPayload), len(payload)) - require.Equal(t, uint64(52), migratedSize) + require.Equal(t, uint64(48), migratedSize) }) t.Run("error is storage used does not exist", func(t *testing.T) { @@ -81,7 +81,6 @@ func createAccountPayloadKey(a flow.Address, key string) ledger.Key { return ledger.Key{ KeyParts: []ledger.KeyPart{ ledger.NewKeyPart(state.KeyPartOwner, a.Bytes()), - ledger.NewKeyPart(state.KeyPartController, []byte("")), ledger.NewKeyPart(state.KeyPartKey, []byte(key)), }, } diff --git a/cmd/util/ledger/migrations/utils.go b/cmd/util/ledger/migrations/utils.go index 558cb5eadd3..6c49028d2f3 100644 --- a/cmd/util/ledger/migrations/utils.go +++ b/cmd/util/ledger/migrations/utils.go @@ -12,17 +12,15 @@ import ( ) func KeyToRegisterID(key ledger.Key) (flow.RegisterID, error) { - if len(key.KeyParts) != 3 || + if len(key.KeyParts) != 2 || key.KeyParts[0].Type != state.KeyPartOwner || - key.KeyParts[1].Type != state.KeyPartController || - key.KeyParts[2].Type != state.KeyPartKey { + key.KeyParts[1].Type != state.KeyPartKey { return flow.RegisterID{}, fmt.Errorf("key not in expected format %s", key.String()) } return flow.NewRegisterID( string(key.KeyParts[0].Value), string(key.KeyParts[1].Value), - string(key.KeyParts[2].Value), ), nil } @@ -33,10 +31,6 @@ func registerIDToKey(registerID flow.RegisterID) ledger.Key { Type: state.KeyPartOwner, Value: []byte(registerID.Owner), }, - { - Type: state.KeyPartController, - Value: []byte(registerID.Controller), - }, { Type: state.KeyPartKey, Value: []byte(registerID.Key), diff --git a/cmd/util/ledger/reporters/atree_reporter.go b/cmd/util/ledger/reporters/atree_reporter.go index 369ff2b580a..0db402af746 100644 --- a/cmd/util/ledger/reporters/atree_reporter.go +++ b/cmd/util/ledger/reporters/atree_reporter.go @@ -110,7 +110,6 @@ func getPayloadType(p *ledger.Payload) payloadType { if fvmState.IsFVMStateKey( string(p.Key.KeyParts[0].Value), string(p.Key.KeyParts[1].Value), - string(p.Key.KeyParts[2].Value), ) { return fvmPayloadType } diff --git a/engine/execution/computation/committer/committer_test.go b/engine/execution/computation/committer/committer_test.go index f71e7c3b8d0..894c6281ef2 100644 --- a/engine/execution/computation/committer/committer_test.go +++ b/engine/execution/computation/committer/committer_test.go @@ -37,7 +37,6 @@ func TestLedgerViewCommitter(t *testing.T) { err := view.Set( "owner", - "controller", "key", []byte{1}, ) diff --git a/engine/execution/computation/computer/computer_test.go b/engine/execution/computation/computer/computer_test.go index 8fc3bd7a576..c9c00b2fb7e 100644 --- a/engine/execution/computation/computer/computer_test.go +++ b/engine/execution/computation/computer/computer_test.go @@ -78,7 +78,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { // create a block with 1 collection with 2 transactions block := generateBlock(1, 2, rag) - view := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + view := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) @@ -116,7 +116,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { Return(nil, nil, nil, nil). Once() // just system chunk - view := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + view := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) @@ -159,7 +159,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { opts := append(baseOpts, contextOptions...) ctx := fvm.NewContext(zerolog.Nop(), opts...) - view := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + view := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) @@ -227,7 +227,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { Return(nil, nil, nil, nil). Times(collectionCount + 1) - view := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + view := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) @@ -347,7 +347,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { exe, err := computer.NewBlockComputer(vm, execCtx, metrics.NewNoopCollector(), trace.NewNoopTracer(), zerolog.Nop(), committer.NewNoopViewCommitter()) require.NoError(t, err) - view := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + view := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) @@ -397,7 +397,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { block := generateBlock(0, 0, rag) - view := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + view := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) @@ -447,11 +447,11 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { const transactionCount = 2 block := generateBlock(collectionCount, transactionCount, rag) - view := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + view := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) - err = view.Set(string(address.Bytes()), "", state.KeyAccountStatus, []byte{1}) + err = view.Set(string(address.Bytes()), state.KeyAccountStatus, []byte{1}) require.NoError(t, err) result, err := exe.ExecuteBlock(context.Background(), block, view, programs.NewEmptyPrograms()) @@ -522,11 +522,11 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { block := generateBlock(collectionCount, transactionCount, rag) - view := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + view := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) - err = view.Set(string(address.Bytes()), "", state.KeyAccountStatus, []byte{1}) + err = view.Set(string(address.Bytes()), state.KeyAccountStatus, []byte{1}) require.NoError(t, err) result, err := exe.ExecuteBlock(context.Background(), block, view, programs.NewEmptyPrograms()) @@ -660,8 +660,8 @@ func Test_AccountStatusRegistersAreIncluded(t *testing.T) { key, err := unittest.AccountKeyDefaultFixture() require.NoError(t, err) - view := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { - return ledger.Get(owner, controller, key) + view := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { + return ledger.Get(owner, key) }) sth := state.NewStateHolder(state.NewState(view)) accounts := state.NewAccounts(sth) @@ -687,9 +687,8 @@ func Test_AccountStatusRegistersAreIncluded(t *testing.T) { // make sure check for account status has been registered id := flow.RegisterID{ - Owner: string(address.Bytes()), - Controller: "", - Key: state.KeyAccountStatus, + Owner: string(address.Bytes()), + Key: state.KeyAccountStatus, } require.Contains(t, registerTouches, id.String()) diff --git a/engine/execution/computation/manager_test.go b/engine/execution/computation/manager_test.go index 84531128834..b1be0b4ee7e 100644 --- a/engine/execution/computation/manager_test.go +++ b/engine/execution/computation/manager_test.go @@ -265,7 +265,7 @@ func TestExecuteScript_BalanceScriptFailsIfViewIsEmpty(t *testing.T) { vm := fvm.NewVirtualMachine(rt) - view := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + view := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, fmt.Errorf("error getting register") }) @@ -464,7 +464,7 @@ func (f *FakeUploader) Upload(computationResult *execution.ComputationResult) er } func noopView() *delta.View { - return delta.NewView(func(_, _, _ string) (flow.RegisterValue, error) { + return delta.NewView(func(_, _ string) (flow.RegisterValue, error) { return nil, nil }) } diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index d318ca17f30..85cf511b2aa 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -1069,7 +1069,7 @@ func (e *Engine) ExecuteScriptAtBlockID(ctx context.Context, script []byte, argu return e.computationManager.ExecuteScript(ctx, script, arguments, block, blockView) } -func (e *Engine) GetRegisterAtBlockID(ctx context.Context, owner, controller, key []byte, blockID flow.Identifier) ([]byte, error) { +func (e *Engine) GetRegisterAtBlockID(ctx context.Context, owner, key []byte, blockID flow.Identifier) ([]byte, error) { stateCommit, err := e.execState.StateCommitmentByBlockID(ctx, blockID) if err != nil { @@ -1078,9 +1078,9 @@ func (e *Engine) GetRegisterAtBlockID(ctx context.Context, owner, controller, ke blockView := e.execState.NewView(stateCommit) - data, err := blockView.Get(string(owner), string(controller), string(key)) + data, err := blockView.Get(string(owner), string(key)) if err != nil { - return nil, fmt.Errorf("failed to get the register (owner : %s, controller: %s, key: %s): %w", hex.EncodeToString(owner), hex.EncodeToString(owner), string(key), err) + return nil, fmt.Errorf("failed to get the register (owner : %s, key: %s): %w", hex.EncodeToString(owner), string(key), err) } return data, nil diff --git a/engine/execution/ingestion/ingest_rpc.go b/engine/execution/ingestion/ingest_rpc.go index 69d09e83692..a0c71c51db4 100644 --- a/engine/execution/ingestion/ingest_rpc.go +++ b/engine/execution/ingestion/ingest_rpc.go @@ -16,5 +16,5 @@ type IngestRPC interface { GetAccount(ctx context.Context, address flow.Address, blockID flow.Identifier) (*flow.Account, error) // GetRegisterAtBlockID returns the value of a register at the given Block id (if available) - GetRegisterAtBlockID(ctx context.Context, owner, controller, key []byte, blockID flow.Identifier) ([]byte, error) + GetRegisterAtBlockID(ctx context.Context, owner, key []byte, blockID flow.Identifier) ([]byte, error) } diff --git a/engine/execution/ingestion/mock/ingest_rpc.go b/engine/execution/ingestion/mock/ingest_rpc.go index 7baf2808cc1..9eab12cce8d 100644 --- a/engine/execution/ingestion/mock/ingest_rpc.go +++ b/engine/execution/ingestion/mock/ingest_rpc.go @@ -61,13 +61,13 @@ func (_m *IngestRPC) GetAccount(ctx context.Context, address flow.Address, block return r0, r1 } -// GetRegisterAtBlockID provides a mock function with given fields: ctx, owner, controller, key, blockID -func (_m *IngestRPC) GetRegisterAtBlockID(ctx context.Context, owner []byte, controller []byte, key []byte, blockID flow.Identifier) ([]byte, error) { - ret := _m.Called(ctx, owner, controller, key, blockID) +// GetRegisterAtBlockID provides a mock function with given fields: ctx, owner, key, blockID +func (_m *IngestRPC) GetRegisterAtBlockID(ctx context.Context, owner []byte, key []byte, blockID flow.Identifier) ([]byte, error) { + ret := _m.Called(ctx, owner, key, blockID) var r0 []byte - if rf, ok := ret.Get(0).(func(context.Context, []byte, []byte, []byte, flow.Identifier) []byte); ok { - r0 = rf(ctx, owner, controller, key, blockID) + if rf, ok := ret.Get(0).(func(context.Context, []byte, []byte, flow.Identifier) []byte); ok { + r0 = rf(ctx, owner, key, blockID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) @@ -75,8 +75,8 @@ func (_m *IngestRPC) GetRegisterAtBlockID(ctx context.Context, owner []byte, con } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, []byte, []byte, []byte, flow.Identifier) error); ok { - r1 = rf(ctx, owner, controller, key, blockID) + if rf, ok := ret.Get(1).(func(context.Context, []byte, []byte, flow.Identifier) error); ok { + r1 = rf(ctx, owner, key, blockID) } else { r1 = ret.Error(1) } diff --git a/engine/execution/rpc/engine.go b/engine/execution/rpc/engine.go index ecc27371a53..4826847b145 100644 --- a/engine/execution/rpc/engine.go +++ b/engine/execution/rpc/engine.go @@ -204,12 +204,11 @@ func (h *handler) GetRegisterAtBlockID( } owner := req.GetRegisterOwner() - controller := req.GetRegisterController() key := req.GetRegisterKey() - value, err := h.engine.GetRegisterAtBlockID(ctx, owner, controller, key, blockID) + value, err := h.engine.GetRegisterAtBlockID(ctx, owner, key, blockID) if err != nil { - return nil, status.Errorf(codes.Internal, "failed to collect register (owner : %s, controller: %s, key: %s): %v", hex.EncodeToString(owner), hex.EncodeToString(owner), string(key), err) + return nil, status.Errorf(codes.Internal, "failed to collect register (owner : %s, key: %s): %v", hex.EncodeToString(owner), string(key), err) } res := &execution.GetRegisterAtBlockIDResponse{ diff --git a/engine/execution/rpc/engine_test.go b/engine/execution/rpc/engine_test.go index f1331655306..e25c56af407 100644 --- a/engine/execution/rpc/engine_test.go +++ b/engine/execution/rpc/engine_test.go @@ -292,7 +292,6 @@ func (suite *Suite) TestGetRegisterAtBlockID() { id := unittest.IdentifierFixture() serviceAddress := flow.Mainnet.Chain().ServiceAddress() - controller := []byte("") validKey := []byte("exists") mockEngine := new(ingestion.IngestRPC) @@ -303,21 +302,20 @@ func (suite *Suite) TestGetRegisterAtBlockID() { chain: flow.Mainnet, } - createReq := func(id, owner, controller, key []byte) *execution.GetRegisterAtBlockIDRequest { + createReq := func(id, owner, key []byte) *execution.GetRegisterAtBlockIDRequest { return &execution.GetRegisterAtBlockIDRequest{ - RegisterOwner: owner, - RegisterController: controller, - RegisterKey: key, - BlockId: id, + RegisterOwner: owner, + RegisterKey: key, + BlockId: id, } } suite.Run("happy path with valid request", func() { // setup mock expectations - mockEngine.On("GetRegisterAtBlockID", mock.Anything, serviceAddress.Bytes(), controller, validKey, id).Return([]uint8{1}, nil).Once() + mockEngine.On("GetRegisterAtBlockID", mock.Anything, serviceAddress.Bytes(), validKey, id).Return([]uint8{1}, nil).Once() - req := createReq(id[:], serviceAddress.Bytes(), controller, validKey) + req := createReq(id[:], serviceAddress.Bytes(), validKey) resp, err := handler.GetRegisterAtBlockID(context.Background(), req) suite.Require().NoError(err) @@ -330,9 +328,9 @@ func (suite *Suite) TestGetRegisterAtBlockID() { suite.Run("invalid request with bad address", func() { badOwner := []byte("\uFFFD") // return error - mockEngine.On("GetRegisterAtBlockID", mock.Anything, badOwner, controller, validKey, id).Return(nil, errors.New("error")).Once() + mockEngine.On("GetRegisterAtBlockID", mock.Anything, badOwner, validKey, id).Return(nil, errors.New("error")).Once() - req := createReq(id[:], badOwner, controller, validKey) + req := createReq(id[:], badOwner, validKey) _, err := handler.GetRegisterAtBlockID(context.Background(), req) suite.Require().Error(err) }) diff --git a/engine/execution/state/bootstrap/bootstrap_test.go b/engine/execution/state/bootstrap/bootstrap_test.go index 2bb75cebaf5..c37d832bbd7 100644 --- a/engine/execution/state/bootstrap/bootstrap_test.go +++ b/engine/execution/state/bootstrap/bootstrap_test.go @@ -47,7 +47,7 @@ func TestBootstrapLedger(t *testing.T) { } func TestBootstrapLedger_ZeroTokenSupply(t *testing.T) { - expectedStateCommitmentBytes, _ := hex.DecodeString("e4b748da1215452fcaa60f5549d261bf3f76a8a6e9a8388a6d8ee73193b4e331") + expectedStateCommitmentBytes, _ := hex.DecodeString("4757ff7f70776434bd67ad209f03cdf234bb41aa12ac4b0434feb79ff42a8bc5") expectedStateCommitment, err := flow.ToStateCommitment(expectedStateCommitmentBytes) require.NoError(t, err) diff --git a/engine/execution/state/delta/delta.go b/engine/execution/state/delta/delta.go index b4f956466cc..b4eeec632c7 100644 --- a/engine/execution/state/delta/delta.go +++ b/engine/execution/state/delta/delta.go @@ -20,8 +20,8 @@ func NewDelta() Delta { } } -func toString(owner, controller, key string) string { - register := flow.NewRegisterID(owner, controller, key) +func toString(owner, key string) string { + register := flow.NewRegisterID(owner, key) return register.String() } @@ -29,16 +29,16 @@ func toString(owner, controller, key string) string { // // This function will return nil if the given key has been deleted in this delta. // Second return parameters indicated if the value has been set/deleted in this delta -func (d Delta) Get(owner, controller, key string) (flow.RegisterValue, bool) { - value, set := d.Data[toString(owner, controller, key)] +func (d Delta) Get(owner, key string) (flow.RegisterValue, bool) { + value, set := d.Data[toString(owner, key)] return value.Value, set } // Set records an update in this delta. -func (d Delta) Set(owner, controller, key string, value flow.RegisterValue) { - k := toString(owner, controller, key) +func (d Delta) Set(owner, key string, value flow.RegisterValue) { + k := toString(owner, key) d.Data[k] = flow.RegisterEntry{ - Key: flow.NewRegisterID(owner, controller, key), + Key: flow.NewRegisterID(owner, key), Value: value, } } diff --git a/engine/execution/state/delta/delta_test.go b/engine/execution/state/delta/delta_test.go index fc5b6f421ca..c5ccd029ab6 100644 --- a/engine/execution/state/delta/delta_test.go +++ b/engine/execution/state/delta/delta_test.go @@ -16,7 +16,7 @@ func TestDelta_Get(t *testing.T) { t.Run("ValueNotSet", func(t *testing.T) { d := delta.NewDelta() - b, exists := d.Get(registerID1, "", "") + b, exists := d.Get(registerID1, "") assert.Nil(t, b) assert.False(t, exists) }) @@ -24,9 +24,9 @@ func TestDelta_Get(t *testing.T) { t.Run("ValueSet", func(t *testing.T) { d := delta.NewDelta() - d.Set(registerID1, "", "", []byte("apple")) + d.Set(registerID1, "", []byte("apple")) - b, exists := d.Get(registerID1, "", "") + b, exists := d.Get(registerID1, "") assert.Equal(t, flow.RegisterValue("apple"), b) assert.True(t, exists) }) @@ -37,15 +37,15 @@ func TestDelta_Set(t *testing.T) { d := delta.NewDelta() - d.Set(registerID1, "", "", []byte("apple")) + d.Set(registerID1, "", []byte("apple")) - b1, exists := d.Get(registerID1, "", "") + b1, exists := d.Get(registerID1, "") assert.Equal(t, []byte("apple"), b1) assert.True(t, exists) - d.Set(registerID1, "", "", []byte("orange")) + d.Set(registerID1, "", []byte("orange")) - b2, exists := d.Get(registerID1, "", "") + b2, exists := d.Get(registerID1, "") assert.Equal(t, []byte("orange"), b2) assert.True(t, exists) } @@ -59,15 +59,15 @@ func TestDelta_MergeWith(t *testing.T) { d1 := delta.NewDelta() d2 := delta.NewDelta() - d1.Set(registerID1, "", "", []byte("apple")) - d2.Set(registerID2, "", "", []byte("carrot")) + d1.Set(registerID1, "", []byte("apple")) + d2.Set(registerID2, "", []byte("carrot")) d1.MergeWith(d2) - b1, _ := d1.Get(registerID1, "", "") + b1, _ := d1.Get(registerID1, "") assert.Equal(t, flow.RegisterValue("apple"), b1) - b2, _ := d1.Get(registerID2, "", "") + b2, _ := d1.Get(registerID2, "") assert.Equal(t, flow.RegisterValue("carrot"), b2) }) @@ -75,12 +75,12 @@ func TestDelta_MergeWith(t *testing.T) { d1 := delta.NewDelta() d2 := delta.NewDelta() - d1.Set(registerID1, "", "", flow.RegisterValue("apple")) - d2.Set(registerID1, "", "", flow.RegisterValue("orange")) + d1.Set(registerID1, "", flow.RegisterValue("apple")) + d2.Set(registerID1, "", flow.RegisterValue("orange")) d1.MergeWith(d2) - b, _ := d1.Get(registerID1, "", "") + b, _ := d1.Get(registerID1, "") assert.Equal(t, flow.RegisterValue("orange"), b) }) @@ -88,14 +88,14 @@ func TestDelta_MergeWith(t *testing.T) { d1 := delta.NewDelta() d2 := delta.NewDelta() - d1.Set(registerID1, "", "", flow.RegisterValue("apple")) - d1.Set(registerID1, "", "", nil) + d1.Set(registerID1, "", flow.RegisterValue("apple")) + d1.Set(registerID1, "", nil) - d2.Set(registerID1, "", "", flow.RegisterValue("orange")) + d2.Set(registerID1, "", flow.RegisterValue("orange")) d1.MergeWith(d2) - b, _ := d1.Get(registerID1, "", "") + b, _ := d1.Get(registerID1, "") assert.Equal(t, flow.RegisterValue("orange"), b) }) @@ -103,13 +103,13 @@ func TestDelta_MergeWith(t *testing.T) { d1 := delta.NewDelta() d2 := delta.NewDelta() - d1.Set(registerID1, "", "", flow.RegisterValue("apple")) + d1.Set(registerID1, "", flow.RegisterValue("apple")) - d2.Set(registerID1, "", "", nil) + d2.Set(registerID1, "", nil) d1.MergeWith(d2) - b, exists := d1.Get(registerID1, "", "") + b, exists := d1.Get(registerID1, "") assert.Nil(t, b) assert.True(t, exists) }) @@ -121,11 +121,11 @@ func TestDelta_RegisterUpdatesAreSorted(t *testing.T) { data := make(flow.RegisterEntries, 5) - data[0].Key = flow.NewRegisterID("a", "a", "1") - data[1].Key = flow.NewRegisterID("a", "b", "1") - data[2].Key = flow.NewRegisterID("b", "a", "1") - data[3].Key = flow.NewRegisterID("b", "b", "1") - data[4].Key = flow.NewRegisterID("b", "b", "2") + data[0].Key = flow.NewRegisterID("a", "1") + data[1].Key = flow.NewRegisterID("b", "1") + data[2].Key = flow.NewRegisterID("c", "1") + data[3].Key = flow.NewRegisterID("d", "1") + data[4].Key = flow.NewRegisterID("d", "2") data[0].Value = flow.RegisterValue("a") data[1].Value = flow.RegisterValue("b") @@ -136,11 +136,11 @@ func TestDelta_RegisterUpdatesAreSorted(t *testing.T) { sort.Sort(data) // set in random order - d.Set(data[2].Key.Owner, data[2].Key.Controller, data[2].Key.Key, data[2].Value) - d.Set(data[1].Key.Owner, data[1].Key.Controller, data[1].Key.Key, data[1].Value) - d.Set(data[3].Key.Owner, data[3].Key.Controller, data[3].Key.Key, data[3].Value) - d.Set(data[0].Key.Owner, data[0].Key.Controller, data[0].Key.Key, data[0].Value) - d.Set(data[4].Key.Owner, data[4].Key.Controller, data[4].Key.Key, data[4].Value) + d.Set(data[2].Key.Owner, data[2].Key.Key, data[2].Value) + d.Set(data[1].Key.Owner, data[1].Key.Key, data[1].Value) + d.Set(data[3].Key.Owner, data[3].Key.Key, data[3].Value) + d.Set(data[0].Key.Owner, data[0].Key.Key, data[0].Value) + d.Set(data[4].Key.Owner, data[4].Key.Key, data[4].Value) retKeys, retValues := d.RegisterUpdates() diff --git a/engine/execution/state/delta/view.go b/engine/execution/state/delta/view.go index 5810af39643..06d2b82a606 100644 --- a/engine/execution/state/delta/view.go +++ b/engine/execution/state/delta/view.go @@ -10,7 +10,7 @@ import ( ) // GetRegisterFunc is a function that returns the value for a register. -type GetRegisterFunc func(owner, controller, key string) (flow.RegisterValue, error) +type GetRegisterFunc func(owner, key string) (flow.RegisterValue, error) // A View is a read-only view into a ledger stored in an underlying data source. // @@ -40,7 +40,7 @@ type SpockSnapshot struct { SpockSecret []byte } -func AlwaysEmptyGetRegisterFunc(owner, controller, key string) (flow.RegisterValue, error) { +func AlwaysEmptyGetRegisterFunc(owner, key string) (flow.RegisterValue, error) { return nil, nil } @@ -118,13 +118,13 @@ func (v *View) RegisterUpdates() ([]flow.RegisterID, []flow.RegisterValue) { // // This function will return an error if it fails to read from the underlying // data source for this view. -func (v *View) Get(owner, controller, key string) (flow.RegisterValue, error) { +func (v *View) Get(owner, key string) (flow.RegisterValue, error) { var err error - registerID := flow.NewRegisterID(owner, controller, key) + registerID := flow.NewRegisterID(owner, key) - value, exists := v.delta.Get(owner, controller, key) + value, exists := v.delta.Get(owner, key) if !exists { - value, err = v.readFunc(owner, controller, key) + value, err = v.readFunc(owner, key) if err != nil { return nil, fmt.Errorf("get register failed: %w", err) } @@ -143,18 +143,18 @@ func (v *View) Get(owner, controller, key string) (flow.RegisterValue, error) { } // Peek reads the value without registering the read, as when used as parent read function -func (v *View) Peek(owner, controller, key string) (flow.RegisterValue, error) { - value, exists := v.delta.Get(owner, controller, key) +func (v *View) Peek(owner, key string) (flow.RegisterValue, error) { + value, exists := v.delta.Get(owner, key) if exists { return value, nil } - return v.readFunc(owner, controller, key) + return v.readFunc(owner, key) } // Set sets a register value in this view. -func (v *View) Set(owner, controller, key string, value flow.RegisterValue) error { - registerID := flow.NewRegisterID(owner, controller, key) +func (v *View) Set(owner, key string, value flow.RegisterValue) error { + registerID := flow.NewRegisterID(owner, key) // every time we write something to delta (order preserving) we update // the spock secret with both the register ID and value. @@ -171,14 +171,14 @@ func (v *View) Set(owner, controller, key string, value flow.RegisterValue) erro // capture register touch v.regTouchSet[registerID.String()] = registerID // add key value to delta - v.delta.Set(owner, controller, key, value) + v.delta.Set(owner, key, value) return nil } // Touch explicitly adds a register to the touched registers set. -func (v *View) Touch(owner, controller, key string) error { +func (v *View) Touch(owner, key string) error { - k := flow.NewRegisterID(owner, controller, key) + k := flow.NewRegisterID(owner, key) // capture register touch v.regTouchSet[k.String()] = k @@ -189,8 +189,8 @@ func (v *View) Touch(owner, controller, key string) error { } // Delete removes a register in this view. -func (v *View) Delete(owner, controller, key string) error { - return v.Set(owner, controller, key, nil) +func (v *View) Delete(owner, key string) error { + return v.Set(owner, key, nil) } // Delta returns a record of the registers that were mutated in this view. diff --git a/engine/execution/state/delta/view_test.go b/engine/execution/state/delta/view_test.go index f5ed7d4738c..165cfb6f47e 100644 --- a/engine/execution/state/delta/view_test.go +++ b/engine/execution/state/delta/view_test.go @@ -15,30 +15,30 @@ func TestViewGet(t *testing.T) { registerID := "fruit" t.Run("ValueNotSet", func(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) - b, err := v.Get(registerID, "", "") + b, err := v.Get(registerID, "") assert.NoError(t, err) assert.Nil(t, b) }) t.Run("ValueNotInCache", func(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { if owner == registerID { return flow.RegisterValue("orange"), nil } return nil, nil }) - b, err := v.Get(registerID, "", "") + b, err := v.Get(registerID, "") assert.NoError(t, err) assert.Equal(t, flow.RegisterValue("orange"), b) }) t.Run("ValueInCache", func(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { if owner == registerID { return flow.RegisterValue("orange"), nil } @@ -46,10 +46,10 @@ func TestViewGet(t *testing.T) { return nil, nil }) - err := v.Set(registerID, "", "", flow.RegisterValue("apple")) + err := v.Set(registerID, "", flow.RegisterValue("apple")) assert.NoError(t, err) - b, err := v.Get(registerID, "", "") + b, err := v.Get(registerID, "") assert.NoError(t, err) assert.Equal(t, flow.RegisterValue("apple"), b) }) @@ -58,43 +58,43 @@ func TestViewGet(t *testing.T) { func TestViewSet(t *testing.T) { registerID := "fruit" - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) - err := v.Set(registerID, "", "", flow.RegisterValue("apple")) + err := v.Set(registerID, "", flow.RegisterValue("apple")) assert.NoError(t, err) - b1, err := v.Get(registerID, "", "") + b1, err := v.Get(registerID, "") assert.NoError(t, err) assert.Equal(t, flow.RegisterValue("apple"), b1) - err = v.Set(registerID, "", "", flow.RegisterValue("orange")) + err = v.Set(registerID, "", flow.RegisterValue("orange")) assert.NoError(t, err) - b2, err := v.Get(registerID, "", "") + b2, err := v.Get(registerID, "") assert.NoError(t, err) assert.Equal(t, flow.RegisterValue("orange"), b2) t.Run("AfterDelete", func(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) - err := v.Set(registerID, "", "", flow.RegisterValue("apple")) + err := v.Set(registerID, "", flow.RegisterValue("apple")) assert.NoError(t, err) - err = v.Delete(registerID, "", "") + err = v.Delete(registerID, "") assert.NoError(t, err) - err = v.Set(registerID, "", "", flow.RegisterValue("orange")) + err = v.Set(registerID, "", flow.RegisterValue("orange")) assert.NoError(t, err) - b, err := v.Get(registerID, "", "") + b, err := v.Get(registerID, "") assert.NoError(t, err) assert.Equal(t, flow.RegisterValue("orange"), b) }) t.Run("SpockSecret", func(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) @@ -102,7 +102,7 @@ func TestViewSet(t *testing.T) { assert.Equal(t, v.SpockSecret(), v.Interactions().SpockSecret) }) - v = delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v = delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) @@ -111,7 +111,7 @@ func TestViewSet(t *testing.T) { registerID3 := "reg3" // prepare the registerID bytes - register := flow.NewRegisterID("", "", "") + register := flow.NewRegisterID("", "") register.Owner = registerID1 registerID1Bytes := register.Bytes() register.Owner = registerID2 @@ -122,44 +122,44 @@ func TestViewSet(t *testing.T) { // this part checks that spocks ordering be based // on update orders and not registerIDs expSpock := hash.NewSHA3_256() - err = v.Set(registerID2, "", "", flow.RegisterValue("1")) + err = v.Set(registerID2, "", flow.RegisterValue("1")) require.NoError(t, err) hashIt(t, expSpock, registerID2Bytes) hashIt(t, expSpock, []byte("1")) - err = v.Set(registerID3, "", "", flow.RegisterValue("2")) + err = v.Set(registerID3, "", flow.RegisterValue("2")) require.NoError(t, err) hashIt(t, expSpock, registerID3Bytes) hashIt(t, expSpock, []byte("2")) - err = v.Set(registerID1, "", "", flow.RegisterValue("3")) + err = v.Set(registerID1, "", flow.RegisterValue("3")) require.NoError(t, err) hashIt(t, expSpock, registerID1Bytes) hashIt(t, expSpock, []byte("3")) - _, err := v.Get(registerID1, "", "") + _, err := v.Get(registerID1, "") require.NoError(t, err) hashIt(t, expSpock, registerID1Bytes) // this part uses the delete functionality // to check that only the register ID is written to the spock secret - err = v.Delete(registerID1, "", "") + err = v.Delete(registerID1, "") require.NoError(t, err) hashIt(t, expSpock, registerID1Bytes) // this part checks that it always update the // intermediate values and not just the final values - err = v.Set(registerID1, "", "", flow.RegisterValue("4")) + err = v.Set(registerID1, "", flow.RegisterValue("4")) require.NoError(t, err) hashIt(t, expSpock, registerID1Bytes) hashIt(t, expSpock, []byte("4")) - err = v.Set(registerID1, "", "", flow.RegisterValue("5")) + err = v.Set(registerID1, "", flow.RegisterValue("5")) require.NoError(t, err) hashIt(t, expSpock, registerID1Bytes) hashIt(t, expSpock, []byte("5")) - err = v.Set(registerID3, "", "", flow.RegisterValue("6")) + err = v.Set(registerID3, "", flow.RegisterValue("6")) require.NoError(t, err) hashIt(t, expSpock, registerID3Bytes) hashIt(t, expSpock, []byte("6")) @@ -177,24 +177,24 @@ func TestView_Delete(t *testing.T) { registerID := "fruit" t.Run("ValueNotSet", func(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) - b1, err := v.Get(registerID, "", "") + b1, err := v.Get(registerID, "") assert.NoError(t, err) assert.Nil(t, b1) - err = v.Delete(registerID, "", "") + err = v.Delete(registerID, "") assert.NoError(t, err) - b2, err := v.Get(registerID, "", "") + b2, err := v.Get(registerID, "") assert.NoError(t, err) assert.Nil(t, b2) }) t.Run("ValueInCache", func(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { if owner == registerID { return flow.RegisterValue("orange"), nil } @@ -202,17 +202,17 @@ func TestView_Delete(t *testing.T) { return nil, nil }) - err := v.Set(registerID, "", "", flow.RegisterValue("apple")) + err := v.Set(registerID, "", flow.RegisterValue("apple")) assert.NoError(t, err) - b1, err := v.Get(registerID, "", "") + b1, err := v.Get(registerID, "") assert.NoError(t, err) assert.Equal(t, flow.RegisterValue("apple"), b1) - err = v.Delete(registerID, "", "") + err = v.Delete(registerID, "") assert.NoError(t, err) - b2, err := v.Get(registerID, "", "") + b2, err := v.Get(registerID, "") assert.NoError(t, err) assert.Nil(t, b2) }) @@ -226,146 +226,146 @@ func TestViewMergeView(t *testing.T) { registerID3 := "diary" t.Run("EmptyView", func(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) chView := v.NewChild() - err := chView.Set(registerID1, "", "", flow.RegisterValue("apple")) + err := chView.Set(registerID1, "", flow.RegisterValue("apple")) assert.NoError(t, err) - err = chView.Set(registerID2, "", "", flow.RegisterValue("carrot")) + err = chView.Set(registerID2, "", flow.RegisterValue("carrot")) assert.NoError(t, err) err = v.MergeView(chView) assert.NoError(t, err) - b1, err := v.Get(registerID1, "", "") + b1, err := v.Get(registerID1, "") assert.NoError(t, err) assert.Equal(t, flow.RegisterValue("apple"), b1) - b2, err := v.Get(registerID2, "", "") + b2, err := v.Get(registerID2, "") assert.NoError(t, err) assert.Equal(t, flow.RegisterValue("carrot"), b2) }) t.Run("EmptyDelta", func(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) - err := v.Set(registerID1, "", "", flow.RegisterValue("apple")) + err := v.Set(registerID1, "", flow.RegisterValue("apple")) assert.NoError(t, err) - err = v.Set(registerID2, "", "", flow.RegisterValue("carrot")) + err = v.Set(registerID2, "", flow.RegisterValue("carrot")) assert.NoError(t, err) chView := v.NewChild() err = v.MergeView(chView) assert.NoError(t, err) - b1, err := v.Get(registerID1, "", "") + b1, err := v.Get(registerID1, "") assert.NoError(t, err) assert.Equal(t, flow.RegisterValue("apple"), b1) - b2, err := v.Get(registerID2, "", "") + b2, err := v.Get(registerID2, "") assert.NoError(t, err) assert.Equal(t, flow.RegisterValue("carrot"), b2) }) t.Run("NoCollisions", func(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) - err := v.Set(registerID1, "", "", flow.RegisterValue("apple")) + err := v.Set(registerID1, "", flow.RegisterValue("apple")) assert.NoError(t, err) chView := v.NewChild() - err = chView.Set(registerID2, "", "", flow.RegisterValue("carrot")) + err = chView.Set(registerID2, "", flow.RegisterValue("carrot")) assert.NoError(t, err) err = v.MergeView(chView) assert.NoError(t, err) - b1, err := v.Get(registerID1, "", "") + b1, err := v.Get(registerID1, "") assert.NoError(t, err) assert.Equal(t, flow.RegisterValue("apple"), b1) - b2, err := v.Get(registerID2, "", "") + b2, err := v.Get(registerID2, "") assert.NoError(t, err) assert.Equal(t, flow.RegisterValue("carrot"), b2) }) t.Run("OverwriteSetValue", func(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) - err := v.Set(registerID1, "", "", flow.RegisterValue("apple")) + err := v.Set(registerID1, "", flow.RegisterValue("apple")) assert.NoError(t, err) chView := v.NewChild() - err = chView.Set(registerID1, "", "", flow.RegisterValue("orange")) + err = chView.Set(registerID1, "", flow.RegisterValue("orange")) assert.NoError(t, err) err = v.MergeView(chView) assert.NoError(t, err) - b, err := v.Get(registerID1, "", "") + b, err := v.Get(registerID1, "") assert.NoError(t, err) assert.Equal(t, flow.RegisterValue("orange"), b) }) t.Run("OverwriteDeletedValue", func(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) - err := v.Set(registerID1, "", "", flow.RegisterValue("apple")) + err := v.Set(registerID1, "", flow.RegisterValue("apple")) assert.NoError(t, err) - err = v.Delete(registerID1, "", "") + err = v.Delete(registerID1, "") assert.NoError(t, err) chView := v.NewChild() - err = chView.Set(registerID1, "", "", flow.RegisterValue("orange")) + err = chView.Set(registerID1, "", flow.RegisterValue("orange")) assert.NoError(t, err) err = v.MergeView(chView) assert.NoError(t, err) - b, err := v.Get(registerID1, "", "") + b, err := v.Get(registerID1, "") assert.NoError(t, err) assert.Equal(t, flow.RegisterValue("orange"), b) }) t.Run("DeleteSetValue", func(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) - err := v.Set(registerID1, "", "", flow.RegisterValue("apple")) + err := v.Set(registerID1, "", flow.RegisterValue("apple")) assert.NoError(t, err) chView := v.NewChild() - err = chView.Delete(registerID1, "", "") + err = chView.Delete(registerID1, "") assert.NoError(t, err) err = v.MergeView(chView) assert.NoError(t, err) - b, err := v.Get(registerID1, "", "") + b, err := v.Get(registerID1, "") assert.NoError(t, err) assert.Nil(t, b) }) t.Run("SpockDataMerge", func(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) - register := flow.NewRegisterID("", "", "") + register := flow.NewRegisterID("", "") register.Owner = registerID1 registerID1Bytes := register.Bytes() register.Owner = registerID2 registerID2Bytes := register.Bytes() expSpock1 := hash.NewSHA3_256() - err := v.Set(registerID1, "", "", flow.RegisterValue("apple")) + err := v.Set(registerID1, "", flow.RegisterValue("apple")) assert.NoError(t, err) hashIt(t, expSpock1, registerID1Bytes) hashIt(t, expSpock1, []byte("apple")) @@ -373,7 +373,7 @@ func TestViewMergeView(t *testing.T) { expSpock2 := hash.NewSHA3_256() chView := v.NewChild() - err = chView.Set(registerID2, "", "", flow.RegisterValue("carrot")) + err = chView.Set(registerID2, "", flow.RegisterValue("carrot")) require.NoError(t, err) hashIt(t, expSpock2, registerID2Bytes) hashIt(t, expSpock2, []byte("carrot")) @@ -389,17 +389,17 @@ func TestViewMergeView(t *testing.T) { }) t.Run("RegisterTouchesDataMerge", func(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) - err := v.Set(registerID1, "", "", flow.RegisterValue("apple")) + err := v.Set(registerID1, "", flow.RegisterValue("apple")) assert.NoError(t, err) chView := v.NewChild() - err = chView.Set(registerID2, "", "", flow.RegisterValue("carrot")) + err = chView.Set(registerID2, "", flow.RegisterValue("carrot")) assert.NoError(t, err) - err = chView.Set(registerID3, "", "", flow.RegisterValue("milk")) + err = chView.Set(registerID3, "", flow.RegisterValue("milk")) assert.NoError(t, err) err = v.MergeView(chView) @@ -409,9 +409,9 @@ func TestViewMergeView(t *testing.T) { require.Len(t, reads, 3) - r1 := flow.NewRegisterID(registerID1, "", "") - r2 := flow.NewRegisterID(registerID2, "", "") - r3 := flow.NewRegisterID(registerID3, "", "") + r1 := flow.NewRegisterID(registerID1, "") + r2 := flow.NewRegisterID(registerID2, "") + r3 := flow.NewRegisterID(registerID3, "") assert.Equal(t, map[string]flow.RegisterID{ r1.String(): r1, @@ -426,7 +426,7 @@ func TestView_RegisterTouches(t *testing.T) { registerID1 := "fruit" registerID2 := "vegetable" - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) @@ -436,7 +436,7 @@ func TestView_RegisterTouches(t *testing.T) { }) t.Run("Set and Get", func(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { if owner == registerID1 { return flow.RegisterValue("orange"), nil } @@ -448,10 +448,10 @@ func TestView_RegisterTouches(t *testing.T) { return nil, nil }) - _, err := v.Get(registerID1, "", "") + _, err := v.Get(registerID1, "") assert.NoError(t, err) - err = v.Set(registerID2, "", "", flow.RegisterValue("apple")) + err = v.Set(registerID2, "", flow.RegisterValue("apple")) assert.NoError(t, err) touches := v.Interactions().RegisterTouches() @@ -460,7 +460,7 @@ func TestView_RegisterTouches(t *testing.T) { } func TestView_AllRegisters(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) @@ -470,7 +470,7 @@ func TestView_AllRegisters(t *testing.T) { }) t.Run("Set and Get", func(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { if owner == "a" { return flow.RegisterValue("a_value"), nil } @@ -481,28 +481,28 @@ func TestView_AllRegisters(t *testing.T) { return nil, nil }) - _, err := v.Get("a", "", "") + _, err := v.Get("a", "") assert.NoError(t, err) - _, err = v.Get("b", "", "") + _, err = v.Get("b", "") assert.NoError(t, err) - err = v.Set("c", "", "", flow.RegisterValue("c_value")) + err = v.Set("c", "", flow.RegisterValue("c_value")) assert.NoError(t, err) - err = v.Set("d", "", "", flow.RegisterValue("d_value")) + err = v.Set("d", "", flow.RegisterValue("d_value")) assert.NoError(t, err) - err = v.Touch("e", "", "") + err = v.Touch("e", "") assert.NoError(t, err) - err = v.Touch("f", "", "") + err = v.Touch("f", "") assert.NoError(t, err) allRegs := v.Interactions().AllRegisters() assert.Len(t, allRegs, 6) }) t.Run("With Merge", func(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { if owner == "a" { return flow.RegisterValue("a_value"), nil } @@ -514,20 +514,20 @@ func TestView_AllRegisters(t *testing.T) { }) vv := v.NewChild() - _, err := vv.Get("a", "", "") + _, err := vv.Get("a", "") assert.NoError(t, err) - _, err = vv.Get("b", "", "") + _, err = vv.Get("b", "") assert.NoError(t, err) - err = vv.Set("c", "", "", flow.RegisterValue("c_value")) + err = vv.Set("c", "", flow.RegisterValue("c_value")) assert.NoError(t, err) - err = vv.Set("d", "", "", flow.RegisterValue("d_value")) + err = vv.Set("d", "", flow.RegisterValue("d_value")) assert.NoError(t, err) - err = vv.Touch("e", "", "") + err = vv.Touch("e", "") assert.NoError(t, err) - err = vv.Touch("f", "", "") + err = vv.Touch("f", "") assert.NoError(t, err) err = v.MergeView(vv) @@ -541,7 +541,7 @@ func TestView_Reads(t *testing.T) { registerID1 := "fruit" registerID2 := "vegetable" - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) @@ -551,24 +551,24 @@ func TestView_Reads(t *testing.T) { }) t.Run("Set and Get", func(t *testing.T) { - v := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + v := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, nil }) - _, err := v.Get(registerID2, "", "") + _, err := v.Get(registerID2, "") assert.NoError(t, err) - _, err = v.Get(registerID1, "", "") + _, err = v.Get(registerID1, "") assert.NoError(t, err) - _, err = v.Get(registerID2, "", "") + _, err = v.Get(registerID2, "") assert.NoError(t, err) touches := v.Interactions().Reads require.Len(t, touches, 2) - r1 := flow.NewRegisterID(registerID1, "", "") - r2 := flow.NewRegisterID(registerID2, "", "") + r1 := flow.NewRegisterID(registerID1, "") + r2 := flow.NewRegisterID(registerID2, "") assert.Equal(t, map[string]flow.RegisterID{ r1.String(): r1, diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index 8373ada1d33..2064a1ba1c4 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -73,9 +73,11 @@ type ExecutionState interface { } const ( - KeyPartOwner = uint16(0) - KeyPartController = uint16(1) - KeyPartKey = uint16(2) + KeyPartOwner = uint16(0) + // @deprecated - controller was used only by the very first + // version of cadence for access controll which was retired later on + // KeyPartController = uint16(1) + KeyPartKey = uint16(2) ) type state struct { @@ -97,7 +99,6 @@ type state struct { func RegisterIDToKey(reg flow.RegisterID) ledger.Key { return ledger.NewKey([]ledger.KeyPart{ ledger.NewKeyPart(KeyPartOwner, []byte(reg.Owner)), - ledger.NewKeyPart(KeyPartController, []byte(reg.Controller)), ledger.NewKeyPart(KeyPartKey, []byte(reg.Key)), }) } @@ -136,9 +137,9 @@ func NewExecutionState( } -func makeSingleValueQuery(commitment flow.StateCommitment, owner, controller, key string) (*ledger.QuerySingleValue, error) { +func makeSingleValueQuery(commitment flow.StateCommitment, owner, key string) (*ledger.QuerySingleValue, error) { return ledger.NewQuerySingleValue(ledger.State(commitment), - RegisterIDToKey(flow.NewRegisterID(owner, controller, key)), + RegisterIDToKey(flow.NewRegisterID(owner, key)), ) } @@ -172,18 +173,17 @@ func LedgerGetRegister(ldg ledger.Ledger, commitment flow.StateCommitment) delta readCache := make(map[flow.RegisterID]flow.RegisterEntry) - return func(owner, controller, key string) (flow.RegisterValue, error) { + return func(owner, key string) (flow.RegisterValue, error) { regID := flow.RegisterID{ - Owner: owner, - Controller: controller, - Key: key, + Owner: owner, + Key: key, } if value, ok := readCache[regID]; ok { return value.Value, nil } - query, err := makeSingleValueQuery(commitment, owner, controller, key) + query, err := makeSingleValueQuery(commitment, owner, key) if err != nil { return nil, fmt.Errorf("cannot create ledger query: %w", err) diff --git a/engine/execution/state/state_test.go b/engine/execution/state/state_test.go index 4debe0c5878..a1aee4708e6 100644 --- a/engine/execution/state/state_test.go +++ b/engine/execution/state/state_test.go @@ -72,9 +72,9 @@ func TestExecutionStateWithTrieStorage(t *testing.T) { view1 := es.NewView(sc1) - err = view1.Set(registerID1, "", "", flow.RegisterValue("apple")) + err = view1.Set(registerID1, "", flow.RegisterValue("apple")) assert.NoError(t, err) - err = view1.Set(registerID2, "", "", flow.RegisterValue("carrot")) + err = view1.Set(registerID2, "", flow.RegisterValue("carrot")) assert.NoError(t, err) sc2, update, err := state.CommitDelta(l, view1.Delta(), sc1) @@ -84,11 +84,11 @@ func TestExecutionStateWithTrieStorage(t *testing.T) { assert.Len(t, update.Paths, 2) assert.Len(t, update.Payloads, 2) - key1 := ledger2.NewKey([]ledger2.KeyPart{ledger2.NewKeyPart(0, []byte(registerID1)), ledger2.NewKeyPart(1, []byte("")), ledger2.NewKeyPart(2, []byte(""))}) + key1 := ledger2.NewKey([]ledger2.KeyPart{ledger2.NewKeyPart(0, []byte(registerID1)), ledger2.NewKeyPart(2, []byte(""))}) path1, err := pathfinder.KeyToPath(key1, ledger.DefaultPathFinderVersion) assert.NoError(t, err) - key2 := ledger2.NewKey([]ledger2.KeyPart{ledger2.NewKeyPart(0, []byte(registerID2)), ledger2.NewKeyPart(1, []byte("")), ledger2.NewKeyPart(2, []byte(""))}) + key2 := ledger2.NewKey([]ledger2.KeyPart{ledger2.NewKeyPart(0, []byte(registerID2)), ledger2.NewKeyPart(2, []byte(""))}) path2, err := pathfinder.KeyToPath(key2, ledger.DefaultPathFinderVersion) assert.NoError(t, err) @@ -103,9 +103,9 @@ func TestExecutionStateWithTrieStorage(t *testing.T) { view2 := es.NewView(sc2) - b1, err := view2.Get(registerID1, "", "") + b1, err := view2.Get(registerID1, "") assert.NoError(t, err) - b2, err := view2.Get(registerID2, "", "") + b2, err := view2.Get(registerID2, "") assert.NoError(t, err) assert.Equal(t, flow.RegisterValue("apple"), b1) @@ -119,14 +119,14 @@ func TestExecutionStateWithTrieStorage(t *testing.T) { view1 := es.NewView(sc1) - err = view1.Set(registerID1, "", "", []byte("apple")) + err = view1.Set(registerID1, "", []byte("apple")) assert.NoError(t, err) sc2, _, err := state.CommitDelta(l, view1.Delta(), sc1) assert.NoError(t, err) // update value and get resulting state commitment view2 := es.NewView(sc2) - err = view2.Set(registerID1, "", "", []byte("orange")) + err = view2.Set(registerID1, "", []byte("orange")) assert.NoError(t, err) sc3, _, err := state.CommitDelta(l, view2.Delta(), sc2) @@ -139,10 +139,10 @@ func TestExecutionStateWithTrieStorage(t *testing.T) { view4 := es.NewView(sc3) // fetch the value at both versions - b1, err := view3.Get(registerID1, "", "") + b1, err := view3.Get(registerID1, "") assert.NoError(t, err) - b2, err := view4.Get(registerID1, "", "") + b2, err := view4.Get(registerID1, "") assert.NoError(t, err) assert.Equal(t, flow.RegisterValue("apple"), b1) @@ -156,9 +156,9 @@ func TestExecutionStateWithTrieStorage(t *testing.T) { // set initial value view1 := es.NewView(sc1) - err = view1.Set(registerID1, "", "", []byte("apple")) + err = view1.Set(registerID1, "", []byte("apple")) assert.NoError(t, err) - err = view1.Set(registerID2, "", "", []byte("apple")) + err = view1.Set(registerID2, "", []byte("apple")) assert.NoError(t, err) sc2, _, err := state.CommitDelta(l, view1.Delta(), sc1) @@ -166,7 +166,7 @@ func TestExecutionStateWithTrieStorage(t *testing.T) { // update value and get resulting state commitment view2 := es.NewView(sc2) - err = view2.Delete(registerID1, "", "") + err = view2.Delete(registerID1, "") assert.NoError(t, err) sc3, _, err := state.CommitDelta(l, view2.Delta(), sc2) @@ -179,10 +179,10 @@ func TestExecutionStateWithTrieStorage(t *testing.T) { view4 := es.NewView(sc3) // fetch the value at both versions - b1, err := view3.Get(registerID1, "", "") + b1, err := view3.Get(registerID1, "") assert.NoError(t, err) - b2, err := view4.Get(registerID1, "", "") + b2, err := view4.Get(registerID1, "") assert.NoError(t, err) assert.Equal(t, flow.RegisterValue("apple"), b1) @@ -196,9 +196,9 @@ func TestExecutionStateWithTrieStorage(t *testing.T) { // set initial value view1 := es.NewView(sc1) - err = view1.Set(registerID1, "", "", flow.RegisterValue("apple")) + err = view1.Set(registerID1, "", flow.RegisterValue("apple")) assert.NoError(t, err) - err = view1.Set(registerID2, "", "", flow.RegisterValue("apple")) + err = view1.Set(registerID2, "", flow.RegisterValue("apple")) assert.NoError(t, err) sc2, _, err := state.CommitDelta(l, view1.Delta(), sc1) diff --git a/fvm/accounts_test.go b/fvm/accounts_test.go index 6fc75847062..7be886afa04 100644 --- a/fvm/accounts_test.go +++ b/fvm/accounts_test.go @@ -1295,7 +1295,7 @@ func TestAccountBalanceFields(t *testing.T) { } `, address))) - view = delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + view = delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, fmt.Errorf("error getting register %s, %s", flow.BytesToAddress([]byte(owner)).Hex(), key) }) @@ -1336,7 +1336,7 @@ func TestAccountBalanceFields(t *testing.T) { assert.NoError(t, err) assert.NoError(t, script.Err) - assert.Equal(t, cadence.UFix64(9999_2550), script.Value) + assert.Equal(t, cadence.UFix64(9999_2710), script.Value) }), ) @@ -1494,7 +1494,7 @@ func TestGetStorageCapacity(t *testing.T) { } `, address))) - view = delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + view = delta.NewView(func(owner, key string) (flow.RegisterValue, error) { return nil, fmt.Errorf("error getting register %s, %s", flow.BytesToAddress([]byte(owner)).Hex(), key) }) diff --git a/fvm/errors/execution.go b/fvm/errors/execution.go index d0407a61442..e0b71d62d3a 100644 --- a/fvm/errors/execution.go +++ b/fvm/errors/execution.go @@ -199,20 +199,19 @@ func (e EventLimitExceededError) Code() ErrorCode { // A StateKeySizeLimitError indicates that the provided key has exceeded the size limit allowed by the storage type StateKeySizeLimitError struct { - owner string - controller string - key string - size uint64 - limit uint64 + owner string + key string + size uint64 + limit uint64 } // NewStateKeySizeLimitError constructs a StateKeySizeLimitError -func NewStateKeySizeLimitError(owner, controller, key string, size, limit uint64) *StateKeySizeLimitError { - return &StateKeySizeLimitError{owner: owner, controller: controller, key: key, size: size, limit: limit} +func NewStateKeySizeLimitError(owner, key string, size, limit uint64) *StateKeySizeLimitError { + return &StateKeySizeLimitError{owner: owner, key: key, size: size, limit: limit} } func (e StateKeySizeLimitError) Error() string { - return fmt.Sprintf("%s key %s has size %d which is higher than storage key size limit %d.", e.Code().String(), strings.Join([]string{e.owner, e.controller, e.key}, "/"), e.size, e.limit) + return fmt.Sprintf("%s key %s has size %d which is higher than storage key size limit %d.", e.Code().String(), strings.Join([]string{e.owner, e.key}, "/"), e.size, e.limit) } // Code returns the error code for this error diff --git a/fvm/fvm_test.go b/fvm/fvm_test.go index 07564cad3c1..678758715d3 100644 --- a/fvm/fvm_test.go +++ b/fvm/fvm_test.go @@ -1459,7 +1459,7 @@ func TestStorageUsed(t *testing.T) { binary.BigEndian.PutUint64(storageUsed, 5) simpleView := utils.NewSimpleView() - err = simpleView.Set(string(address), "", state.KeyStorageUsed, storageUsed) + err = simpleView.Set(string(address), state.KeyStorageUsed, storageUsed) require.NoError(t, err) script := fvm.Script(code) diff --git a/fvm/handler/programs_test.go b/fvm/handler/programs_test.go index 18af736766d..da6b0d9560d 100644 --- a/fvm/handler/programs_test.go +++ b/fvm/handler/programs_test.go @@ -107,7 +107,7 @@ func Test_Programs(t *testing.T) { ).AddAuthorizer(address) } - mainView := delta.NewView(func(_, _, _ string) (flow.RegisterValue, error) { + mainView := delta.NewView(func(_, _ string) (flow.RegisterValue, error) { return nil, nil }) @@ -184,12 +184,12 @@ func Test_Programs(t *testing.T) { procCallA := fvm.Transaction(callTx("A", addressA), 1) loadedCode := false - viewExecA := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + viewExecA := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { if key == state.ContractKey("A") { loadedCode = true } - return mainView.Peek(owner, controller, key) + return mainView.Peek(owner, key) }) err = vm.Run(context, procCallA, viewExecA, programs) @@ -219,11 +219,11 @@ func Test_Programs(t *testing.T) { require.NoError(t, err) // execute transaction again, this time make sure it doesn't load code - viewExecA2 := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + viewExecA2 := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { //this time we fail if a read of code occurs require.NotEqual(t, key, state.ContractKey("A")) - return mainView.Peek(owner, controller, key) + return mainView.Peek(owner, key) }) procCallA = fvm.Transaction(callTx("A", addressA), 2) @@ -290,7 +290,7 @@ func Test_Programs(t *testing.T) { idsA, valuesA := deltaA.Delta().RegisterUpdates() for i, id := range idsA { - v, has := deltaB.Delta().Get(id.Owner, id.Controller, id.Key) + v, has := deltaB.Delta().Get(id.Owner, id.Key) require.True(t, has) require.Equal(t, valuesA[i], v) @@ -313,12 +313,12 @@ func Test_Programs(t *testing.T) { // rerun transaction // execute transaction again, this time make sure it doesn't load code - viewExecB2 := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + viewExecB2 := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { //this time we fail if a read of code occurs require.NotEqual(t, key, state.ContractKey("A")) require.NotEqual(t, key, state.ContractKey("B")) - return mainView.Peek(owner, controller, key) + return mainView.Peek(owner, key) }) procCallB = fvm.Transaction(callTx("B", addressB), 5) @@ -340,9 +340,9 @@ func Test_Programs(t *testing.T) { // at this point programs cache should contain data for contract A // only because contract B has been called - viewExecA := delta.NewView(func(owner, controller, key string) (flow.RegisterValue, error) { + viewExecA := delta.NewView(func(owner, key string) (flow.RegisterValue, error) { require.NotEqual(t, key, state.ContractKey("A")) - return mainView.Peek(owner, controller, key) + return mainView.Peek(owner, key) }) // run a TX using contract A diff --git a/fvm/mock/state/ledger.go b/fvm/mock/state/ledger.go index 13b2924baf4..d0e9825a376 100644 --- a/fvm/mock/state/ledger.go +++ b/fvm/mock/state/ledger.go @@ -9,13 +9,13 @@ type Ledger struct { mock.Mock } -// Delete provides a mock function with given fields: owner, controller, key -func (_m *Ledger) Delete(owner string, controller string, key string) error { - ret := _m.Called(owner, controller, key) +// Delete provides a mock function with given fields: owner, key +func (_m *Ledger) Delete(owner string, key string) error { + ret := _m.Called(owner, key) var r0 error - if rf, ok := ret.Get(0).(func(string, string, string) error); ok { - r0 = rf(owner, controller, key) + if rf, ok := ret.Get(0).(func(string, string) error); ok { + r0 = rf(owner, key) } else { r0 = ret.Error(0) } @@ -23,13 +23,13 @@ func (_m *Ledger) Delete(owner string, controller string, key string) error { return r0 } -// Get provides a mock function with given fields: owner, controller, key -func (_m *Ledger) Get(owner string, controller string, key string) ([]byte, error) { - ret := _m.Called(owner, controller, key) +// Get provides a mock function with given fields: owner, key +func (_m *Ledger) Get(owner string, key string) ([]byte, error) { + ret := _m.Called(owner, key) var r0 []byte - if rf, ok := ret.Get(0).(func(string, string, string) []byte); ok { - r0 = rf(owner, controller, key) + if rf, ok := ret.Get(0).(func(string, string) []byte); ok { + r0 = rf(owner, key) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) @@ -37,8 +37,8 @@ func (_m *Ledger) Get(owner string, controller string, key string) ([]byte, erro } var r1 error - if rf, ok := ret.Get(1).(func(string, string, string) error); ok { - r1 = rf(owner, controller, key) + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(owner, key) } else { r1 = ret.Error(1) } @@ -46,13 +46,13 @@ func (_m *Ledger) Get(owner string, controller string, key string) ([]byte, erro return r0, r1 } -// Set provides a mock function with given fields: owner, controller, key, value -func (_m *Ledger) Set(owner string, controller string, key string, value []byte) error { - ret := _m.Called(owner, controller, key, value) +// Set provides a mock function with given fields: owner, key, value +func (_m *Ledger) Set(owner string, key string, value []byte) error { + ret := _m.Called(owner, key, value) var r0 error - if rf, ok := ret.Get(0).(func(string, string, string, []byte) error); ok { - r0 = rf(owner, controller, key, value) + if rf, ok := ret.Get(0).(func(string, string, []byte) error); ok { + r0 = rf(owner, key, value) } else { r0 = ret.Error(0) } @@ -60,13 +60,13 @@ func (_m *Ledger) Set(owner string, controller string, key string, value []byte) return r0 } -// Touch provides a mock function with given fields: owner, controller, key -func (_m *Ledger) Touch(owner string, controller string, key string) error { - ret := _m.Called(owner, controller, key) +// Touch provides a mock function with given fields: owner, key +func (_m *Ledger) Touch(owner string, key string) error { + ret := _m.Called(owner, key) var r0 error - if rf, ok := ret.Get(0).(func(string, string, string) error); ok { - r0 = rf(owner, controller, key) + if rf, ok := ret.Get(0).(func(string, string) error); ok { + r0 = rf(owner, key) } else { r0 = ret.Error(0) } diff --git a/fvm/mock/state/view.go b/fvm/mock/state/view.go index eed84df19ac..46cdcbdd00f 100644 --- a/fvm/mock/state/view.go +++ b/fvm/mock/state/view.go @@ -30,13 +30,13 @@ func (_m *View) AllRegisters() []flow.RegisterID { return r0 } -// Delete provides a mock function with given fields: owner, controller, key -func (_m *View) Delete(owner string, controller string, key string) error { - ret := _m.Called(owner, controller, key) +// Delete provides a mock function with given fields: owner, key +func (_m *View) Delete(owner string, key string) error { + ret := _m.Called(owner, key) var r0 error - if rf, ok := ret.Get(0).(func(string, string, string) error); ok { - r0 = rf(owner, controller, key) + if rf, ok := ret.Get(0).(func(string, string) error); ok { + r0 = rf(owner, key) } else { r0 = ret.Error(0) } @@ -49,13 +49,13 @@ func (_m *View) DropDelta() { _m.Called() } -// Get provides a mock function with given fields: owner, controller, key -func (_m *View) Get(owner string, controller string, key string) ([]byte, error) { - ret := _m.Called(owner, controller, key) +// Get provides a mock function with given fields: owner, key +func (_m *View) Get(owner string, key string) ([]byte, error) { + ret := _m.Called(owner, key) var r0 []byte - if rf, ok := ret.Get(0).(func(string, string, string) []byte); ok { - r0 = rf(owner, controller, key) + if rf, ok := ret.Get(0).(func(string, string) []byte); ok { + r0 = rf(owner, key) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) @@ -63,8 +63,8 @@ func (_m *View) Get(owner string, controller string, key string) ([]byte, error) } var r1 error - if rf, ok := ret.Get(1).(func(string, string, string) error); ok { - r1 = rf(owner, controller, key) + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(owner, key) } else { r1 = ret.Error(1) } @@ -127,13 +127,13 @@ func (_m *View) RegisterUpdates() ([]flow.RegisterID, [][]byte) { return r0, r1 } -// Set provides a mock function with given fields: owner, controller, key, value -func (_m *View) Set(owner string, controller string, key string, value []byte) error { - ret := _m.Called(owner, controller, key, value) +// Set provides a mock function with given fields: owner, key, value +func (_m *View) Set(owner string, key string, value []byte) error { + ret := _m.Called(owner, key, value) var r0 error - if rf, ok := ret.Get(0).(func(string, string, string, []byte) error); ok { - r0 = rf(owner, controller, key, value) + if rf, ok := ret.Get(0).(func(string, string, []byte) error); ok { + r0 = rf(owner, key, value) } else { r0 = ret.Error(0) } @@ -141,13 +141,13 @@ func (_m *View) Set(owner string, controller string, key string, value []byte) e return r0 } -// Touch provides a mock function with given fields: owner, controller, key -func (_m *View) Touch(owner string, controller string, key string) error { - ret := _m.Called(owner, controller, key) +// Touch provides a mock function with given fields: owner, key +func (_m *View) Touch(owner string, key string) error { + ret := _m.Called(owner, key) var r0 error - if rf, ok := ret.Get(0).(func(string, string, string) error); ok { - r0 = rf(owner, controller, key) + if rf, ok := ret.Get(0).(func(string, string) error); ok { + r0 = rf(owner, key) } else { r0 = ret.Error(0) } diff --git a/fvm/state/accounts.go b/fvm/state/accounts.go index c7f8e3621b5..c541f046b9e 100644 --- a/fvm/state/accounts.go +++ b/fvm/state/accounts.go @@ -81,7 +81,7 @@ func (a *StatefulAccounts) AllocateStorageIndex(address flow.Address) (atree.Sto // and won't do ledger getValue for every new slabs (currently happening to compute storage size changes) // this way the getValue would load this value from deltas key := atree.SlabIndexToLedgerKey(index) - err = a.stateHolder.State().Set(string(address.Bytes()), "", string(key), []byte{}, false) + err = a.stateHolder.State().Set(string(address.Bytes()), string(key), []byte{}, false) if err != nil { return atree.StorageIndex{}, fmt.Errorf("failed to store empty value for newly allocated storage index: %w", err) } @@ -407,7 +407,7 @@ func (a *StatefulAccounts) setStorageUsed(address flow.Address, used uint64) err } func (a *StatefulAccounts) GetValue(address flow.Address, key string) (flow.RegisterValue, error) { - return a.stateHolder.State().Get(string(address.Bytes()), "", key, a.stateHolder.EnforceInteractionLimits()) + return a.stateHolder.State().Get(string(address.Bytes()), key, a.stateHolder.EnforceInteractionLimits()) } // SetValue sets a value in address' storage @@ -416,7 +416,7 @@ func (a *StatefulAccounts) SetValue(address flow.Address, key string, value flow if err != nil { return fmt.Errorf("failed to update storage used by key %s on account %s: %w", PrintableKey(key), address, err) } - return a.stateHolder.State().Set(string(address.Bytes()), "", key, value, a.stateHolder.EnforceInteractionLimits()) + return a.stateHolder.State().Set(string(address.Bytes()), key, value, a.stateHolder.EnforceInteractionLimits()) } @@ -470,7 +470,6 @@ func RegisterSize(address flow.Address, key string, value flow.RegisterValue) in // additional 2 is for len prefixes when encoding is happening // we might get rid of these 2s in the future size += 2 + len(string(address.Bytes())) - size += 2 + len("") // controller size += 2 + len(key) size += len(value) return size @@ -479,7 +478,7 @@ func RegisterSize(address flow.Address, key string, value flow.RegisterValue) in // TODO replace with touch // TODO handle errors func (a *StatefulAccounts) touch(address flow.Address, key string) { - _, _ = a.stateHolder.State().Get(string(address.Bytes()), "", key, a.stateHolder.EnforceInteractionLimits()) + _, _ = a.stateHolder.State().Get(string(address.Bytes()), key, a.stateHolder.EnforceInteractionLimits()) } func (a *StatefulAccounts) TouchContract(contractName string, address flow.Address) { diff --git a/fvm/state/accounts_test.go b/fvm/state/accounts_test.go index 54d11b9fffb..93b13b5a794 100644 --- a/fvm/state/accounts_test.go +++ b/fvm/state/accounts_test.go @@ -67,7 +67,8 @@ func TestAccounts_GetPublicKey(t *testing.T) { view := utils.NewSimpleView() err := view.Set( - string(address.Bytes()), string(address.Bytes()), "public_key_0", + string(address.Bytes()), + "public_key_0", ledgerValue, ) require.NoError(t, err) @@ -94,7 +95,8 @@ func TestAccounts_GetPublicKeyCount(t *testing.T) { view := utils.NewSimpleView() err := view.Set( - string(address.Bytes()), string(address.Bytes()), "public_key_count", + string(address.Bytes()), + "public_key_count", ledgerValue, ) require.NoError(t, err) @@ -122,7 +124,8 @@ func TestAccounts_GetPublicKeys(t *testing.T) { view := utils.NewSimpleView() err := view.Set( - string(address.Bytes()), string(address.Bytes()), "public_key_count", + string(address.Bytes()), + "public_key_count", ledgerValue, ) require.NoError(t, err) @@ -153,7 +156,6 @@ func TestAccounts_GetWithNoKeysCounter(t *testing.T) { require.NoError(t, err) err = view.Delete( - string(address.Bytes()), string(address.Bytes()), "public_key_count") @@ -270,7 +272,7 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(52), storageUsed) + require.Equal(t, uint64(48), storageUsed) }) t.Run("Storage used on register set increases", func(t *testing.T) { @@ -287,7 +289,7 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(52+34), storageUsed) + require.Equal(t, uint64(48+32), storageUsed) }) t.Run("Storage used, set twice on same register to same value, stays the same", func(t *testing.T) { @@ -306,7 +308,7 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(52+34), storageUsed) + require.Equal(t, uint64(48+32), storageUsed) }) t.Run("Storage used, set twice on same register to larger value, increases", func(t *testing.T) { @@ -325,7 +327,7 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(52+35), storageUsed) + require.Equal(t, uint64(48+33), storageUsed) }) t.Run("Storage used, set twice on same register to smaller value, decreases", func(t *testing.T) { @@ -344,7 +346,7 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(52+33), storageUsed) + require.Equal(t, uint64(48+31), storageUsed) }) t.Run("Storage used, after register deleted, decreases", func(t *testing.T) { @@ -363,7 +365,7 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(52+0), storageUsed) + require.Equal(t, uint64(48+0), storageUsed) }) t.Run("Storage used on a complex scenario has correct value", func(t *testing.T) { @@ -392,7 +394,7 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(52+33+46), storageUsed) + require.Equal(t, uint64(48+33+42), storageUsed) }) } diff --git a/fvm/state/address_generator.go b/fvm/state/address_generator.go index a325eacadb1..271eba56676 100644 --- a/fvm/state/address_generator.go +++ b/fvm/state/address_generator.go @@ -27,7 +27,7 @@ func NewStateBoundAddressGenerator(stateHolder *StateHolder, chain flow.Chain) * // this requires changes outside of fvm since the type is defined on flow model // and emulator and others might be dependent on that func (g *StateBoundAddressGenerator) Bytes() []byte { - stateBytes, err := g.stateHolder.State().Get("", "", keyAddressState, g.stateHolder.EnforceInteractionLimits()) + stateBytes, err := g.stateHolder.State().Get("", keyAddressState, g.stateHolder.EnforceInteractionLimits()) if err != nil { panic(err) } @@ -36,7 +36,7 @@ func (g *StateBoundAddressGenerator) Bytes() []byte { func (g *StateBoundAddressGenerator) constructAddressGen() (flow.AddressGenerator, error) { st := g.stateHolder.State() - stateBytes, err := st.Get("", "", keyAddressState, g.stateHolder.EnforceInteractionLimits()) + stateBytes, err := st.Get("", keyAddressState, g.stateHolder.EnforceInteractionLimits()) if err != nil { return nil, fmt.Errorf("failed to read address generator state from the state: %w", err) } @@ -57,7 +57,7 @@ func (g *StateBoundAddressGenerator) NextAddress() (flow.Address, error) { } // update the ledger state - err = g.stateHolder.State().Set("", "", keyAddressState, addressGenerator.Bytes(), g.stateHolder.EnforceInteractionLimits()) + err = g.stateHolder.State().Set("", keyAddressState, addressGenerator.Bytes(), g.stateHolder.EnforceInteractionLimits()) if err != nil { return address, fmt.Errorf("failed to update the state with address generator state: %w", err) } diff --git a/fvm/state/address_generator_test.go b/fvm/state/address_generator_test.go index 5b5fd9ad9f0..b0f5a24e369 100644 --- a/fvm/state/address_generator_test.go +++ b/fvm/state/address_generator_test.go @@ -26,7 +26,7 @@ func Test_NewStateBoundAddressGenerator_GeneratingUpdatesState(t *testing.T) { _, err := generator.NextAddress() require.NoError(t, err) - stateBytes, err := view.Get("", "", "account_address_state") + stateBytes, err := view.Get("", "account_address_state") require.NoError(t, err) require.Equal(t, flow.BytesToAddress(stateBytes), flow.HexToAddress("01")) @@ -34,7 +34,7 @@ func Test_NewStateBoundAddressGenerator_GeneratingUpdatesState(t *testing.T) { func Test_NewStateBoundAddressGenerator_UsesLedgerState(t *testing.T) { view := utils.NewSimpleView() - err := view.Set("", "", "account_address_state", flow.HexToAddress("01").Bytes()) + err := view.Set("", "account_address_state", flow.HexToAddress("01").Bytes()) require.NoError(t, err) chain := flow.MonotonicEmulator.Chain() @@ -44,7 +44,7 @@ func Test_NewStateBoundAddressGenerator_UsesLedgerState(t *testing.T) { _, err = generator.NextAddress() require.NoError(t, err) - stateBytes, err := view.Get("", "", "account_address_state") + stateBytes, err := view.Get("", "account_address_state") require.NoError(t, err) require.Equal(t, flow.BytesToAddress(stateBytes), flow.HexToAddress("02")) diff --git a/fvm/state/state.go b/fvm/state/state.go index f30a1a54cdb..8ed98fde388 100644 --- a/fvm/state/state.go +++ b/fvm/state/state.go @@ -22,7 +22,7 @@ const ( ) type mapKey struct { - owner, controller, key string + owner, key string } // State represents the execution state @@ -110,18 +110,18 @@ func (s *State) InteractionUsed() uint64 { return s.TotalBytesRead + s.TotalBytesWritten } -// Get returns a register value given owner, controller and key -func (s *State) Get(owner, controller, key string, enforceLimit bool) (flow.RegisterValue, error) { +// Get returns a register value given owner and key +func (s *State) Get(owner, key string, enforceLimit bool) (flow.RegisterValue, error) { var value []byte var err error if enforceLimit { - if err = s.checkSize(owner, controller, key, []byte{}); err != nil { + if err = s.checkSize(owner, key, []byte{}); err != nil { return nil, err } } - if value, err = s.view.Get(owner, controller, key); err != nil { + if value, err = s.view.Get(owner, key); err != nil { // wrap error into a fatal error getError := errors.NewLedgerFailure(err) // wrap with more info @@ -129,10 +129,9 @@ func (s *State) Get(owner, controller, key string, enforceLimit bool) (flow.Regi } // if not part of recent updates count them as read - if _, ok := s.updateSize[mapKey{owner, controller, key}]; !ok { + if _, ok := s.updateSize[mapKey{owner, key}]; !ok { s.ReadCounter++ - s.TotalBytesRead += uint64(len(owner) + - len(controller) + len(key) + len(value)) + s.TotalBytesRead += uint64(len(owner) + len(key) + len(value)) } if enforceLimit { @@ -143,14 +142,14 @@ func (s *State) Get(owner, controller, key string, enforceLimit bool) (flow.Regi } // Set updates state delta with a register update -func (s *State) Set(owner, controller, key string, value flow.RegisterValue, enforceLimit bool) error { +func (s *State) Set(owner, key string, value flow.RegisterValue, enforceLimit bool) error { if enforceLimit { - if err := s.checkSize(owner, controller, key, value); err != nil { + if err := s.checkSize(owner, key, value); err != nil { return err } } - if err := s.view.Set(owner, controller, key, value); err != nil { + if err := s.view.Set(owner, key, value); err != nil { // wrap error into a fatal error setError := errors.NewLedgerFailure(err) // wrap with more info @@ -167,13 +166,13 @@ func (s *State) Set(owner, controller, key string, value flow.RegisterValue, enf s.updatedAddresses[address] = struct{}{} } - mapKey := mapKey{owner, controller, key} + mapKey := mapKey{owner, key} if old, ok := s.updateSize[mapKey]; ok { s.WriteCounter-- s.TotalBytesWritten -= old } - updateSize := uint64(len(owner) + len(controller) + len(key) + len(value)) + updateSize := uint64(len(owner) + len(key) + len(value)) s.WriteCounter++ s.TotalBytesWritten += updateSize s.updateSize[mapKey] = updateSize @@ -182,13 +181,13 @@ func (s *State) Set(owner, controller, key string, value flow.RegisterValue, enf } // Delete deletes a register -func (s *State) Delete(owner, controller, key string, enforceLimit bool) error { - return s.Set(owner, controller, key, nil, enforceLimit) +func (s *State) Delete(owner, key string, enforceLimit bool) error { + return s.Set(owner, key, nil, enforceLimit) } // Touch touches a register -func (s *State) Touch(owner, controller, key string) error { - return s.view.Touch(owner, controller, key) +func (s *State) Touch(owner, key string) error { + return s.view.Touch(owner, key) } // MeterComputation meters computation usage @@ -304,11 +303,11 @@ func (s *State) checkMaxInteraction() error { return nil } -func (s *State) checkSize(owner, controller, key string, value flow.RegisterValue) error { - keySize := uint64(len(owner) + len(controller) + len(key)) +func (s *State) checkSize(owner, key string, value flow.RegisterValue) error { + keySize := uint64(len(owner) + len(key)) valueSize := uint64(len(value)) if keySize > s.maxKeySizeAllowed { - return errors.NewStateKeySizeLimitError(owner, controller, key, keySize, s.maxKeySizeAllowed) + return errors.NewStateKeySizeLimitError(owner, key, keySize, s.maxKeySizeAllowed) } if valueSize > s.maxValueSizeAllowed { return errors.NewStateValueSizeLimitError(value, valueSize, s.maxValueSizeAllowed) @@ -329,46 +328,45 @@ func addressFromOwner(owner string) (flow.Address, bool) { // IsFVMStateKey returns true if the // key is controlled by the fvm env and // return false otherwise (key controlled by the cadence env) -func IsFVMStateKey(owner, controller, key string) bool { +func IsFVMStateKey(owner, key string) bool { - // check if is a service level key (owner and controller is empty) + // check if is a service level key (owner is empty) // cases: - // - "", "", "uuid" - // - "", "", "account_address_state" - if len(owner) == 0 && len(controller) == 0 { + // - "", "uuid" + // - "", "account_address_state" + if len(owner) == 0 { return true } // check account level keys // cases: - // - address, "", "public_key_count" - // - address, "", "public_key_%d" (index) - // - address, "", "contract_names" - // - address, "", "code.%s" (contract name) - // - address, "", exists - // - address, "", "storage_used" - // - address, "", "frozen" - if len(controller) == 0 { - if key == KeyPublicKeyCount { - return true - } - if bytes.HasPrefix([]byte(key), []byte("public_key_")) { - return true - } - if key == KeyContractNames { - return true - } - if bytes.HasPrefix([]byte(key), []byte(KeyCode)) { - return true - } - if key == KeyAccountStatus { - return true - } - if key == KeyStorageUsed { - return true - } - if key == KeyStorageIndex { - return true - } + // - address, "public_key_count" + // - address, "public_key_%d" (index) + // - address, "contract_names" + // - address, "code.%s" (contract name) + // - address, exists + // - address, "storage_used" + // - address, "frozen" + + if key == KeyPublicKeyCount { + return true + } + if bytes.HasPrefix([]byte(key), []byte("public_key_")) { + return true + } + if key == KeyContractNames { + return true + } + if bytes.HasPrefix([]byte(key), []byte(KeyCode)) { + return true + } + if key == KeyAccountStatus { + return true + } + if key == KeyStorageUsed { + return true + } + if key == KeyStorageIndex { + return true } return false diff --git a/fvm/state/state_test.go b/fvm/state/state_test.go index 87f318ef135..24b93d2a53e 100644 --- a/fvm/state/state_test.go +++ b/fvm/state/state_test.go @@ -20,12 +20,12 @@ func TestState_ChildMergeFunctionality(t *testing.T) { key := "key1" value := createByteArray(1) // set key1 on parent - err := st.Set("address", "controller", key, value, true) + err := st.Set("address", key, value, true) require.NoError(t, err) // read key1 on child stChild := st.NewChild() - v, err := stChild.Get("address", "controller", key, true) + v, err := stChild.Get("address", key, true) require.NoError(t, err) require.Equal(t, v, value) }) @@ -36,11 +36,11 @@ func TestState_ChildMergeFunctionality(t *testing.T) { stChild := st.NewChild() // set key2 on child - err := stChild.Set("address", "controller", key, value, true) + err := stChild.Set("address", key, value, true) require.NoError(t, err) // read key2 on parent - v, err := st.Get("address", "controller", key, true) + v, err := st.Get("address", key, true) require.NoError(t, err) require.Equal(t, len(v), 0) }) @@ -51,11 +51,11 @@ func TestState_ChildMergeFunctionality(t *testing.T) { stChild := st.NewChild() // set key3 on child - err := stChild.Set("address", "controller", key, value, true) + err := stChild.Set("address", key, value, true) require.NoError(t, err) // read before merge - v, err := st.Get("address", "controller", key, true) + v, err := st.Get("address", key, true) require.NoError(t, err) require.Equal(t, len(v), 0) @@ -64,7 +64,7 @@ func TestState_ChildMergeFunctionality(t *testing.T) { require.NoError(t, err) // read key3 on parent - v, err = st.Get("address", "controller", key, true) + v, err = st.Get("address", key, true) require.NoError(t, err) require.Equal(t, v, value) }) @@ -73,11 +73,11 @@ func TestState_ChildMergeFunctionality(t *testing.T) { key := "key4" value := createByteArray(4) // set key4 on parent - err := st.Set("address", "controller", key, value, true) + err := st.Set("address", key, value, true) require.NoError(t, err) // now should be part of the ledger - v, err := view.Get("address", "controller", key) + v, err := view.Get("address", key) require.NoError(t, err) require.Equal(t, v, value) }) @@ -90,8 +90,8 @@ func TestState_InteractionMeasuring(t *testing.T) { key := "key1" value := createByteArray(1) - err := st.Set("address", "controller", key, value, true) - keySize := uint64(len("address") + len("controller") + len(key)) + err := st.Set("address", key, value, true) + keySize := uint64(len("address") + len(key)) size := keySize + uint64(len(value)) require.NoError(t, err) require.Equal(t, uint64(0), st.ReadCounter) @@ -101,7 +101,7 @@ func TestState_InteractionMeasuring(t *testing.T) { // should read from the delta // should not impact totalBytesRead - v, err := st.Get("address", "controller", key, true) + v, err := st.Get("address", key, true) require.NoError(t, err) require.Equal(t, v, value) require.Equal(t, uint64(0), st.TotalBytesRead) @@ -109,7 +109,7 @@ func TestState_InteractionMeasuring(t *testing.T) { // non existing key // should be counted towards reading from the ledger key2 := "key2" - _, err = st.Get("address", "controller", key2, true) + _, err = st.Get("address", key2, true) require.NoError(t, err) require.Equal(t, keySize, st.TotalBytesRead) } @@ -120,33 +120,33 @@ func TestState_MaxValueSize(t *testing.T) { // update should pass value := createByteArray(5) - err := st.Set("address", "controller", "key", value, true) + err := st.Set("address", "key", value, true) require.NoError(t, err) // update shouldn't pass value = createByteArray(7) - err = st.Set("address", "controller", "key", value, true) + err = st.Set("address", "key", value, true) require.Error(t, err) } func TestState_MaxKeySize(t *testing.T) { view := utils.NewSimpleView() - st := state.NewState(view, state.WithMaxKeySizeAllowed(6)) + st := state.NewState(view, state.WithMaxKeySizeAllowed(4)) // read - _, err := st.Get("1", "2", "3", true) + _, err := st.Get("1", "2", true) require.NoError(t, err) // read - _, err = st.Get("123", "234", "345", true) + _, err = st.Get("123", "234", true) require.Error(t, err) // update - err = st.Set("1", "2", "3", []byte{}, true) + err = st.Set("1", "2", []byte{}, true) require.NoError(t, err) // read - err = st.Set("123", "234", "345", []byte{}, true) + err = st.Set("123", "234", []byte{}, true) require.Error(t, err) } @@ -155,59 +155,60 @@ func TestState_MaxInteraction(t *testing.T) { view := utils.NewSimpleView() st := state.NewState(view, state.WithMaxInteractionSizeAllowed(12)) - // read - interaction 3 - _, err := st.Get("1", "2", "3", true) - require.Equal(t, st.InteractionUsed(), uint64(3)) + // read - interaction 2 + _, err := st.Get("1", "2", true) + require.Equal(t, st.InteractionUsed(), uint64(2)) require.NoError(t, err) - // read - interaction 12 - _, err = st.Get("123", "234", "345", true) - require.Equal(t, st.InteractionUsed(), uint64(12)) + // read - interaction 8 + _, err = st.Get("123", "234", true) + require.Equal(t, st.InteractionUsed(), uint64(8)) require.NoError(t, err) - // read - interaction 21 - _, err = st.Get("234", "345", "456", true) - require.Equal(t, st.InteractionUsed(), uint64(21)) + // read - interaction 14 + _, err = st.Get("234", "345", true) + require.Equal(t, st.InteractionUsed(), uint64(14)) require.Error(t, err) - st = state.NewState(view, state.WithMaxInteractionSizeAllowed(9)) + st = state.NewState(view, state.WithMaxInteractionSizeAllowed(6)) stChild := st.NewChild() // update - 0 - err = stChild.Set("1", "2", "3", []byte{'A'}, true) + err = stChild.Set("1", "2", []byte{'A'}, true) require.NoError(t, err) require.Equal(t, st.InteractionUsed(), uint64(0)) // commit err = st.MergeState(stChild, true) require.NoError(t, err) - require.Equal(t, st.InteractionUsed(), uint64(4)) + require.Equal(t, st.InteractionUsed(), uint64(3)) - // read - interaction 4 (already in read cache) - _, err = st.Get("1", "2", "3", true) + // read - interaction 3 (already in read cache) + _, err = st.Get("1", "2", true) require.NoError(t, err) - require.Equal(t, st.InteractionUsed(), uint64(4)) + require.Equal(t, st.InteractionUsed(), uint64(3)) - // read - interaction 7 - _, err = st.Get("2", "3", "4", true) + // read - interaction 5 + _, err = st.Get("2", "3", true) require.NoError(t, err) - require.Equal(t, st.InteractionUsed(), uint64(7)) + require.Equal(t, st.InteractionUsed(), uint64(5)) - // read - interaction 10 - _, err = st.Get("3", "4", "5", true) + // read - interaction 7 + _, err = st.Get("3", "4", true) require.Error(t, err) + } func TestState_IsFVMStateKey(t *testing.T) { - require.True(t, state.IsFVMStateKey("", "", "uuid")) - require.True(t, state.IsFVMStateKey("Address", "", state.KeyPublicKeyCount)) - require.True(t, state.IsFVMStateKey("Address", "", "public_key_12")) - require.True(t, state.IsFVMStateKey("Address", "", state.KeyContractNames)) - require.True(t, state.IsFVMStateKey("Address", "", "code.MYCODE")) - require.True(t, state.IsFVMStateKey("Address", "", state.KeyAccountStatus)) - require.True(t, state.IsFVMStateKey("Address", "", state.KeyStorageUsed)) - require.True(t, state.IsFVMStateKey("Address", "", state.KeyAccountStatus)) - require.False(t, state.IsFVMStateKey("Address", "", "anything else")) + require.True(t, state.IsFVMStateKey("", "uuid")) + require.True(t, state.IsFVMStateKey("Address", state.KeyPublicKeyCount)) + require.True(t, state.IsFVMStateKey("Address", "public_key_12")) + require.True(t, state.IsFVMStateKey("Address", state.KeyContractNames)) + require.True(t, state.IsFVMStateKey("Address", "code.MYCODE")) + require.True(t, state.IsFVMStateKey("Address", state.KeyAccountStatus)) + require.True(t, state.IsFVMStateKey("Address", state.KeyStorageUsed)) + require.True(t, state.IsFVMStateKey("Address", state.KeyAccountStatus)) + require.False(t, state.IsFVMStateKey("Address", "anything else")) } func TestAccounts_PrintableKey(t *testing.T) { diff --git a/fvm/state/uuids.go b/fvm/state/uuids.go index 2f7828fa5cf..1eb54443ec4 100644 --- a/fvm/state/uuids.go +++ b/fvm/state/uuids.go @@ -21,7 +21,7 @@ func NewUUIDGenerator(stateHolder *StateHolder) *UUIDGenerator { // GetUUID reads uint64 byte value for uuid from the state func (u *UUIDGenerator) GetUUID() (uint64, error) { - stateBytes, err := u.stateHolder.State().Get("", "", keyUUID, u.stateHolder.EnforceInteractionLimits()) + stateBytes, err := u.stateHolder.State().Get("", keyUUID, u.stateHolder.EnforceInteractionLimits()) if err != nil { return 0, fmt.Errorf("cannot get uuid byte from state: %w", err) } @@ -34,7 +34,7 @@ func (u *UUIDGenerator) GetUUID() (uint64, error) { func (u *UUIDGenerator) SetUUID(uuid uint64) error { bytes := make([]byte, 8) binary.BigEndian.PutUint64(bytes, uuid) - err := u.stateHolder.State().Set("", "", keyUUID, bytes, u.stateHolder.EnforceInteractionLimits()) + err := u.stateHolder.State().Set("", keyUUID, bytes, u.stateHolder.EnforceInteractionLimits()) if err != nil { return fmt.Errorf("cannot set uuid byte to state: %w", err) } diff --git a/fvm/state/view.go b/fvm/state/view.go index aa862f96969..7af4ef3ee0d 100644 --- a/fvm/state/view.go +++ b/fvm/state/view.go @@ -18,8 +18,8 @@ type View interface { // TODO Rename this to Storage // and remove reference to flow.RegisterValue and use byte[] type Ledger interface { - Set(owner, controller, key string, value flow.RegisterValue) error - Get(owner, controller, key string) (flow.RegisterValue, error) - Touch(owner, controller, key string) error - Delete(owner, controller, key string) error + Set(owner, key string, value flow.RegisterValue) error + Get(owner, key string) (flow.RegisterValue, error) + Touch(owner, key string) error + Delete(owner, key string) error } diff --git a/fvm/transactionInvoker_test.go b/fvm/transactionInvoker_test.go index 9a0b7da6bb5..2c975cee154 100644 --- a/fvm/transactionInvoker_test.go +++ b/fvm/transactionInvoker_test.go @@ -67,7 +67,6 @@ func TestSafetyCheck(t *testing.T) { err = view.Set( string(contractAddress.Bytes()), - "", state.KeyAccountStatus, []byte{1}, ) @@ -75,7 +74,6 @@ func TestSafetyCheck(t *testing.T) { err = view.Set( string(contractAddress.Bytes()), - "", "contract_names", encodedName, ) @@ -83,7 +81,6 @@ func TestSafetyCheck(t *testing.T) { err = view.Set( string(contractAddress.Bytes()), - "", "code.TestContract", []byte(contractCode), ) @@ -146,21 +143,18 @@ func TestSafetyCheck(t *testing.T) { err = view.Set( string(contractAddress.Bytes()), - "", state.KeyAccountStatus, []byte{1}, ) require.NoError(t, err) err = view.Set( string(contractAddress.Bytes()), - "", "contract_names", encodedName, ) require.NoError(t, err) err = view.Set( string(contractAddress.Bytes()), - "", "code.TestContract", []byte(contractCode), ) diff --git a/fvm/utils/test.go b/fvm/utils/test.go index e70e4117a34..13e08467cbb 100644 --- a/fvm/utils/test.go +++ b/fvm/utils/test.go @@ -38,7 +38,7 @@ func (v *SimpleView) MergeView(o state.View) error { } for _, item := range other.Ledger.Registers { - err := v.Ledger.Set(item.Key.Owner, item.Key.Controller, item.Key.Key, item.Value) + err := v.Ledger.Set(item.Key.Owner, item.Key.Key, item.Value) if err != nil { return fmt.Errorf("can not merge: %w", err) } @@ -54,12 +54,12 @@ func (v *SimpleView) DropDelta() { v.Ledger.Registers = make(map[string]flow.RegisterEntry) } -func (v *SimpleView) Set(owner, controller, key string, value flow.RegisterValue) error { - return v.Ledger.Set(owner, controller, key, value) +func (v *SimpleView) Set(owner, key string, value flow.RegisterValue) error { + return v.Ledger.Set(owner, key, value) } -func (v *SimpleView) Get(owner, controller, key string) (flow.RegisterValue, error) { - value, err := v.Ledger.Get(owner, controller, key) +func (v *SimpleView) Get(owner, key string) (flow.RegisterValue, error) { + value, err := v.Ledger.Get(owner, key) if err != nil { return nil, err } @@ -68,7 +68,7 @@ func (v *SimpleView) Get(owner, controller, key string) (flow.RegisterValue, err } if v.Parent != nil { - return v.Parent.Get(owner, controller, key) + return v.Parent.Get(owner, key) } return nil, nil @@ -93,12 +93,12 @@ func (v *SimpleView) RegisterUpdates() ([]flow.RegisterID, []flow.RegisterValue) return ids, values } -func (v *SimpleView) Touch(owner, controller, key string) error { - return v.Ledger.Touch(owner, controller, key) +func (v *SimpleView) Touch(owner, key string) error { + return v.Ledger.Touch(owner, key) } -func (v *SimpleView) Delete(owner, controller, key string) error { - return v.Ledger.Delete(owner, controller, key) +func (v *SimpleView) Delete(owner, key string) error { + return v.Ledger.Delete(owner, key) } // A MapLedger is a naive ledger storage implementation backed by a simple map. @@ -119,38 +119,37 @@ func NewMapLedger() *MapLedger { } } -func (m *MapLedger) Set(owner, controller, key string, value flow.RegisterValue) error { - k := fullKey(owner, controller, key) +func (m *MapLedger) Set(owner, key string, value flow.RegisterValue) error { + k := fullKey(owner, key) m.RegisterTouches[k] = true m.RegisterUpdated[k] = true m.Registers[k] = flow.RegisterEntry{ Key: flow.RegisterID{ - Owner: owner, - Controller: controller, - Key: key, + Owner: owner, + Key: key, }, Value: value, } return nil } -func (m *MapLedger) Get(owner, controller, key string) (flow.RegisterValue, error) { - k := fullKey(owner, controller, key) +func (m *MapLedger) Get(owner, key string) (flow.RegisterValue, error) { + k := fullKey(owner, key) m.RegisterTouches[k] = true return m.Registers[k].Value, nil } -func (m *MapLedger) Touch(owner, controller, key string) error { - m.RegisterTouches[fullKey(owner, controller, key)] = true +func (m *MapLedger) Touch(owner, key string) error { + m.RegisterTouches[fullKey(owner, key)] = true return nil } -func (m *MapLedger) Delete(owner, controller, key string) error { - delete(m.RegisterTouches, fullKey(owner, controller, key)) +func (m *MapLedger) Delete(owner, key string) error { + delete(m.RegisterTouches, fullKey(owner, key)) return nil } -func fullKey(owner, controller, key string) string { +func fullKey(owner, key string) string { // https://en.wikipedia.org/wiki/C0_and_C1_control_codes#Field_separators - return strings.Join([]string{owner, controller, key}, "\x1F") + return strings.Join([]string{owner, key}, "\x1F") } diff --git a/go.mod b/go.mod index cfd100b2752..4a9982d8fa7 100644 --- a/go.mod +++ b/go.mod @@ -55,7 +55,7 @@ require ( github.com/multiformats/go-multihash v0.1.0 github.com/onflow/atree v0.4.0 github.com/onflow/cadence v0.24.2-0.20220627202951-5a06fec82b4a - github.com/onflow/flow v0.3.1 + github.com/onflow/flow v0.3.2 github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220628221832-3d206c1d4790 github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220628221832-3d206c1d4790 github.com/onflow/flow-go-sdk v0.26.5-0.20220629191626-900f9f91bffc diff --git a/go.sum b/go.sum index d62211e5b4e..dffb6418855 100644 --- a/go.sum +++ b/go.sum @@ -1337,8 +1337,8 @@ github.com/onflow/atree v0.4.0/go.mod h1:7Qe1xaW0YewvouLXrugzMFUYXNoRQ8MT/UsVAWx github.com/onflow/cadence v0.15.0/go.mod h1:KMzDF6cIv6nb5PJW9aITaqazbmJX8MMeibFcpPP385M= github.com/onflow/cadence v0.24.2-0.20220627202951-5a06fec82b4a h1:Wr7+zfFj7ehr3nwNtQ9LXGpwS3MWDsUvnlX78aOnnZY= github.com/onflow/cadence v0.24.2-0.20220627202951-5a06fec82b4a/go.mod h1:g19FlFrcQsiegiZDe6wYtUCBO8O1hM1x/+l68aVO07k= -github.com/onflow/flow v0.3.1 h1:kL/tNvCXeBw4yCVPys/m9rxvKxrO7Ck/mVNqHFtkTrI= -github.com/onflow/flow v0.3.1/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP7+c= +github.com/onflow/flow v0.3.2 h1:z3IuKOjM9Tvf0pXfloTbrLxM5nTunI47cklsDd+wxBE= +github.com/onflow/flow v0.3.2/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP7+c= github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220628221832-3d206c1d4790 h1:gt/bIw4IO1zhvmoUnwHR2DcGaLDfxKQHzPEotjaDIWM= github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220628221832-3d206c1d4790/go.mod h1:T6yhM+kWrFxiP6F3hh8lh9DcocHfmv48P4ITnjLhKSk= github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220628221832-3d206c1d4790 h1:/v6qnkavS6FcxQyZ0JgUSZWDmfCzY51FjNn+VDFxJrw= diff --git a/integration/go.mod b/integration/go.mod index 3184dff0e2e..088ed86989e 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -17,9 +17,9 @@ require ( github.com/onflow/cadence v0.24.2-0.20220627202951-5a06fec82b4a github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220628221832-3d206c1d4790 github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220628221832-3d206c1d4790 - github.com/onflow/flow-emulator v0.33.2 + github.com/onflow/flow-emulator v0.33.4-0.20220708171627-a9c955cd26c6 github.com/onflow/flow-ft/lib/go/templates v0.2.0 - github.com/onflow/flow-go v0.25.13-0.20220609230330-ac8d2d78c212 + github.com/onflow/flow-go v0.25.13-0.20220708171206-7015f054a69f github.com/onflow/flow-go-sdk v0.26.5-0.20220629191626-900f9f91bffc github.com/onflow/flow-go/crypto v0.24.4 github.com/onflow/flow/protobuf/go/flow v0.3.1 diff --git a/integration/go.sum b/integration/go.sum index 08262a6c56f..f7f7aab0bb8 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -1442,8 +1442,8 @@ github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220628221832- github.com/onflow/flow-core-contracts/lib/go/contracts v0.11.2-0.20220628221832-3d206c1d4790/go.mod h1:T6yhM+kWrFxiP6F3hh8lh9DcocHfmv48P4ITnjLhKSk= github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220628221832-3d206c1d4790 h1:/v6qnkavS6FcxQyZ0JgUSZWDmfCzY51FjNn+VDFxJrw= github.com/onflow/flow-core-contracts/lib/go/templates v0.11.2-0.20220628221832-3d206c1d4790/go.mod h1:JB2hWVxUjhMshUDNVQKfn4dzhhawOO+i3XjlhLMV5MM= -github.com/onflow/flow-emulator v0.33.2 h1:yBv0yK50qjhQgzcAsew3EoZEmHvJ7cys/21fS2nARNw= -github.com/onflow/flow-emulator v0.33.2/go.mod h1:rngaPAH/lFm0wHJDYnrRRS8hO8YUr0c86SGKZ9z4cUg= +github.com/onflow/flow-emulator v0.33.4-0.20220708171627-a9c955cd26c6 h1:6b4eW5O/2Az1a1XUFaYkemjGNFP+1HOSragLjh2NV/c= +github.com/onflow/flow-emulator v0.33.4-0.20220708171627-a9c955cd26c6/go.mod h1:BR7dTN3WLs5jzPLwB9PeqXk5v8cA4hoInoh3bZeo6WM= github.com/onflow/flow-ft/lib/go/contracts v0.5.0 h1:Cg4gHGVblxcejfNNG5Mfj98Wf4zbY76O0Y28QB0766A= github.com/onflow/flow-ft/lib/go/contracts v0.5.0/go.mod h1:1zoTjp1KzNnOPkyqKmWKerUyf0gciw+e6tAEt0Ks3JE= github.com/onflow/flow-ft/lib/go/templates v0.2.0 h1:oQQk5UthLS9KfKLkZVJg/XAVq8CXW7HAxSTu4HwBJkU= diff --git a/ledger/partial/ledger_test.go b/ledger/partial/ledger_test.go index 0f1485f0bd0..0e30ebb7565 100644 --- a/ledger/partial/ledger_test.go +++ b/ledger/partial/ledger_test.go @@ -111,9 +111,9 @@ func TestProofsForEmptyRegisters(t *testing.T) { view := delta.NewView(executionState.LedgerGetRegister(l, flow.StateCommitment(emptyState))) - registerID := flow.NewRegisterID("b", "o", "nk") + registerID := flow.NewRegisterID("b", "nk") - v, err := view.Get(registerID.Owner, registerID.Controller, registerID.Key) + v, err := view.Get(registerID.Owner, registerID.Key) require.NoError(t, err) require.Empty(t, v) diff --git a/model/flow/ledger.go b/model/flow/ledger.go index bb005fdeb5e..2361824c0f4 100644 --- a/model/flow/ledger.go +++ b/model/flow/ledger.go @@ -10,18 +10,16 @@ import ( ) type RegisterID struct { - Owner string - Controller string - Key string + Owner string + Key string } -// this function returns a string format of a RegisterID in the form '%x/%x/%x' +// this function returns a string format of a RegisterID in the form '%x/%x' // it has been optimized to avoid the memory allocations inside Sprintf func (r *RegisterID) String() string { ownerLen := len(r.Owner) - controllerLen := len(r.Controller) - requiredLen := ((ownerLen + controllerLen + len(r.Key)) * 2) + 2 + requiredLen := ((ownerLen + len(r.Key)) * 2) + 1 arr := make([]byte, requiredLen) @@ -29,11 +27,7 @@ func (r *RegisterID) String() string { arr[2*ownerLen] = byte('/') - hex.Encode(arr[(2*ownerLen)+1:], []byte(r.Controller)) - - arr[2*(ownerLen+controllerLen)+1] = byte('/') - - hex.Encode(arr[2*(ownerLen+controllerLen+1):], []byte(r.Key)) + hex.Encode(arr[(2*ownerLen)+1:], []byte(r.Key)) return string(arr) } @@ -45,11 +39,10 @@ func (r *RegisterID) Bytes() []byte { return fingerprint.Fingerprint(r) } -func NewRegisterID(owner, controller, key string) RegisterID { +func NewRegisterID(owner, key string) RegisterID { return RegisterID{ - Owner: owner, - Controller: controller, - Key: key, + Owner: owner, + Key: key, } } @@ -71,8 +64,6 @@ func (d RegisterEntries) Len() int { func (d RegisterEntries) Less(i, j int) bool { if d[i].Key.Owner != d[j].Key.Owner { return d[i].Key.Owner < d[j].Key.Owner - } else if d[i].Key.Controller != d[j].Key.Controller { - return d[i].Key.Controller < d[j].Key.Controller } return d[i].Key.Key < d[j].Key.Key } diff --git a/model/flow/ledger_test.go b/model/flow/ledger_test.go index ec612b0d86f..c5a615399ee 100644 --- a/model/flow/ledger_test.go +++ b/model/flow/ledger_test.go @@ -16,15 +16,13 @@ var length int func BenchmarkString(b *testing.B) { var r = RegisterID{ - Owner: "theowner", - Controller: "thecontroller", - Key: "123412341234", + Owner: "theowner", + Key: "123412341234", } ownerLen := len(r.Owner) - controllerLen := len(r.Controller) - requiredLen := ((ownerLen + controllerLen + len(r.Key)) * 2) + 2 + requiredLen := ((ownerLen + len(r.Key)) * 2) + 1 arr := make([]byte, requiredLen) @@ -32,11 +30,7 @@ func BenchmarkString(b *testing.B) { arr[2*ownerLen] = byte('/') - hex.Encode(arr[(2*ownerLen)+1:], []byte(r.Controller)) - - arr[2*(ownerLen+controllerLen)+1] = byte('/') - - hex.Encode(arr[2*(ownerLen+controllerLen+1):], []byte(r.Key)) + hex.Encode(arr[(2*ownerLen)+1:], []byte(r.Key)) s := string(arr) length = len(s) @@ -45,12 +39,11 @@ func BenchmarkString(b *testing.B) { func BenchmarkOriginalString(b *testing.B) { var r = RegisterID{ - Owner: "theowner", - Controller: "thecontroller", - Key: "123412341234", + Owner: "theowner", + Key: "123412341234", } - ret := fmt.Sprintf("%x/%x/%x", r.Owner, r.Controller, r.Key) + ret := fmt.Sprintf("%x/%x", r.Owner, r.Key) length = len(ret) } diff --git a/module/chunks/chunkVerifier.go b/module/chunks/chunkVerifier.go index 2f249eb7155..64fe46b7f86 100644 --- a/module/chunks/chunkVerifier.go +++ b/module/chunks/chunkVerifier.go @@ -127,9 +127,9 @@ func (fcv *ChunkVerifier) verifyTransactionsInContext(context fvm.Context, chunk // are not expanded and values are unknown. unknownRegTouch := make(map[string]*ledger.Key) var problematicTx flow.Identifier - getRegister := func(owner, controller, key string) (flow.RegisterValue, error) { + getRegister := func(owner, key string) (flow.RegisterValue, error) { // check if register has been provided in the chunk data pack - registerID := flow.NewRegisterID(owner, controller, key) + registerID := flow.NewRegisterID(owner, key) registerKey := executionState.RegisterIDToKey(registerID) diff --git a/module/chunks/chunkVerifier_test.go b/module/chunks/chunkVerifier_test.go index db40e0a13a6..79faf6a364b 100644 --- a/module/chunks/chunkVerifier_test.go +++ b/module/chunks/chunkVerifier_test.go @@ -251,12 +251,12 @@ func GetBaselineVerifiableChunk(t *testing.T, script string, system bool) *verif blockID := block.ID() // registerTouch and State setup - id1 := flow.NewRegisterID("00", "", "") + id1 := flow.NewRegisterID("00", "") value1 := []byte{'a'} id2Bytes := make([]byte, 32) id2Bytes[0] = byte(5) - id2 := flow.NewRegisterID("05", "", "") + id2 := flow.NewRegisterID("05", "") value2 := []byte{'b'} UpdatedValue2 := []byte{'B'} @@ -375,12 +375,12 @@ func (vm *vmMock) Run(ctx fvm.Context, proc fvm.Procedure, led state.View, progr switch string(tx.Transaction.Script) { case "wrongEndState": // add updates to the ledger - _ = led.Set("00", "", "", []byte{'F'}) + _ = led.Set("00", "", []byte{'F'}) tx.Logs = []string{"log1", "log2"} tx.Events = eventsList case "failedTx": // add updates to the ledger - _ = led.Set("05", "", "", []byte{'B'}) + _ = led.Set("05", "", []byte{'B'}) tx.Err = &fvmErrors.CadenceRuntimeError{} // inside the runtime (e.g. div by zero, access account) case "eventsMismatch": tx.Events = append(eventsList, flow.Event{ @@ -391,9 +391,9 @@ func (vm *vmMock) Run(ctx fvm.Context, proc fvm.Procedure, led state.View, progr Payload: []byte{88}, }) default: - _, _ = led.Get("00", "", "") - _, _ = led.Get("05", "", "") - _ = led.Set("05", "", "", []byte{'B'}) + _, _ = led.Get("00", "") + _, _ = led.Get("05", "") + _ = led.Set("05", "", []byte{'B'}) tx.Logs = []string{"log1", "log2"} tx.Events = eventsList } @@ -412,9 +412,9 @@ func (vm *vmSystemOkMock) Run(ctx fvm.Context, proc fvm.Procedure, led state.Vie tx.ServiceEvents = []flow.Event{epochSetupEvent} // add "default" interaction expected in tests - _, _ = led.Get("00", "", "") - _, _ = led.Get("05", "", "") - _ = led.Set("05", "", "", []byte{'B'}) + _, _ = led.Get("00", "") + _, _ = led.Get("05", "") + _ = led.Set("05", "", []byte{'B'}) tx.Logs = []string{"log1", "log2"} return nil diff --git a/storage/badger/operation/interactions_test.go b/storage/badger/operation/interactions_test.go index ac7b7fb3e92..d5c95c8968d 100644 --- a/storage/badger/operation/interactions_test.go +++ b/storage/badger/operation/interactions_test.go @@ -17,25 +17,25 @@ import ( func TestStateInteractionsInsertCheckRetrieve(t *testing.T) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { - d1 := delta.NewView(func(owner, controller, key string) (value flow.RegisterValue, err error) { + d1 := delta.NewView(func(owner, key string) (value flow.RegisterValue, err error) { return nil, nil }) - d2 := delta.NewView(func(owner, controller, key string) (value flow.RegisterValue, err error) { + d2 := delta.NewView(func(owner, key string) (value flow.RegisterValue, err error) { return nil, nil }) // some set and reads - err := d1.Set(string([]byte("\x89krg\u007fBN\x1d\xf5\xfb\xb8r\xbc4\xbd\x98ռ\xf1\xd0twU\xbf\x16N\xb4?,\xa0&;")), "", "", []byte("zażółć gęślą jaźń")) + err := d1.Set(string([]byte("\x89krg\u007fBN\x1d\xf5\xfb\xb8r\xbc4\xbd\x98ռ\xf1\xd0twU\xbf\x16N\xb4?,\xa0&;")), "", []byte("zażółć gęślą jaźń")) require.NoError(t, err) - err = d1.Set(string([]byte{2}), "", "", []byte("b")) + err = d1.Set(string([]byte{2}), "", []byte("b")) require.NoError(t, err) - err = d1.Set(string([]byte{2}), "", "", []byte("c")) + err = d1.Set(string([]byte{2}), "", []byte("c")) require.NoError(t, err) - _, err = d1.Get(string([]byte{2}), "", "") + _, err = d1.Get(string([]byte{2}), "") require.NoError(t, err) - _, err = d1.Get(string([]byte{3}), "", "") + _, err = d1.Get(string([]byte{3}), "") require.NoError(t, err) interactions := []*delta.Snapshot{&d1.Interactions().Snapshot, &d2.Interactions().Snapshot} diff --git a/utils/debug/registerCache.go b/utils/debug/registerCache.go index 267dcf9aad4..086be61b250 100644 --- a/utils/debug/registerCache.go +++ b/utils/debug/registerCache.go @@ -12,8 +12,8 @@ import ( ) type registerCache interface { - Get(owner, controller, key string) (value []byte, found bool) - Set(owner, controller, key string, value []byte) + Get(owner, key string) (value []byte, found bool) + Set(owner, key string, value []byte) Persist() error } @@ -25,13 +25,13 @@ func newMemRegisterCache() *memRegisterCache { return &memRegisterCache{data: make(map[string]flow.RegisterValue)} } -func (c *memRegisterCache) Get(owner, controller, key string) ([]byte, bool) { - v, found := c.data[owner+"~"+controller+"~"+key] +func (c *memRegisterCache) Get(owner, key string) ([]byte, bool) { + v, found := c.data[owner+"~"+key] return v, found } -func (c *memRegisterCache) Set(owner, controller, key string, value []byte) { - c.data[owner+"~"+controller+"~"+key] = value +func (c *memRegisterCache) Set(owner, key string, value []byte) { + c.data[owner+"~"+key] = value } func (c *memRegisterCache) Persist() error { // No-op @@ -70,15 +70,11 @@ func newFileRegisterCache(filePath string) *fileRegisterCache { if err != nil { panic(err) } - controller, err := hex.DecodeString(d.Key.Controller) - if err != nil { - panic(err) - } keyCopy, err := hex.DecodeString(d.Key.Key) if err != nil { panic(err) } - data[string(owner)+"~"+string(controller)+"~"+string(keyCopy)] = d + data[string(owner)+"~"+string(keyCopy)] = d } if err != nil { break @@ -90,21 +86,20 @@ func newFileRegisterCache(filePath string) *fileRegisterCache { return cache } -func (f *fileRegisterCache) Get(owner, controller, key string) ([]byte, bool) { - v, found := f.data[owner+"~"+controller+"~"+key] +func (f *fileRegisterCache) Get(owner, key string) ([]byte, bool) { + v, found := f.data[owner+"~"+key] if found { return v.Value, found } return nil, found } -func (f *fileRegisterCache) Set(owner, controller, key string, value []byte) { +func (f *fileRegisterCache) Set(owner, key string, value []byte) { valueCopy := make([]byte, len(value)) copy(valueCopy, value) fmt.Println(hex.EncodeToString([]byte(owner)), hex.EncodeToString([]byte(key)), len(value)) - f.data[owner+"~"+controller+"~"+key] = flow.RegisterEntry{ + f.data[owner+"~"+key] = flow.RegisterEntry{ Key: flow.NewRegisterID(hex.EncodeToString([]byte(owner)), - hex.EncodeToString([]byte(controller)), hex.EncodeToString([]byte(key))), Value: flow.RegisterValue(valueCopy), } diff --git a/utils/debug/remoteView.go b/utils/debug/remoteView.go index 0c3ce8ab2cb..b7e31a9185a 100644 --- a/utils/debug/remoteView.go +++ b/utils/debug/remoteView.go @@ -146,36 +146,35 @@ func (v *RemoteView) DropDelta() { v.Delta = make(map[string]flow.RegisterValue) } -func (v *RemoteView) Set(owner, controller, key string, value flow.RegisterValue) error { - v.Delta[owner+"~"+controller+"~"+key] = value +func (v *RemoteView) Set(owner, key string, value flow.RegisterValue) error { + v.Delta[owner+"~"+key] = value return nil } -func (v *RemoteView) Get(owner, controller, key string) (flow.RegisterValue, error) { +func (v *RemoteView) Get(owner, key string) (flow.RegisterValue, error) { // first check the delta - value, found := v.Delta[owner+"~"+controller+"~"+key] + value, found := v.Delta[owner+"~"+key] if found { return value, nil } // then check the read cache - value, found = v.Cache.Get(owner, controller, key) + value, found = v.Cache.Get(owner, key) if found { return value, nil } // then call the parent (if exist) if v.Parent != nil { - return v.Parent.Get(owner, controller, key) + return v.Parent.Get(owner, key) } // last use the grpc api the req := &execution.GetRegisterAtBlockIDRequest{ - BlockId: []byte(v.BlockID), - RegisterOwner: []byte(owner), - RegisterController: []byte(controller), - RegisterKey: []byte(key), + BlockId: []byte(v.BlockID), + RegisterOwner: []byte(owner), + RegisterKey: []byte(key), } // TODO use a proper context for timeouts @@ -184,7 +183,7 @@ func (v *RemoteView) Get(owner, controller, key string) (flow.RegisterValue, err return nil, err } - v.Cache.Set(owner, controller, key, resp.Value) + v.Cache.Set(owner, key, resp.Value) // append value to the file cache @@ -200,12 +199,12 @@ func (v *RemoteView) RegisterUpdates() ([]flow.RegisterID, []flow.RegisterValue) panic("Not implemented yet") } -func (v *RemoteView) Touch(owner, controller, key string) error { +func (v *RemoteView) Touch(owner, key string) error { // no-op for now return nil } -func (v *RemoteView) Delete(owner, controller, key string) error { - v.Delta[owner+"~"+controller+"~"+key] = nil +func (v *RemoteView) Delete(owner, key string) error { + v.Delta[owner+"~"+key] = nil return nil } diff --git a/utils/unittest/execution_state.go b/utils/unittest/execution_state.go index b6974a4e955..c88b56e6aa3 100644 --- a/utils/unittest/execution_state.go +++ b/utils/unittest/execution_state.go @@ -24,7 +24,7 @@ const ServiceAccountPrivateKeySignAlgo = crypto.ECDSAP256 const ServiceAccountPrivateKeyHashAlgo = hash.SHA2_256 // Pre-calculated state commitment with root account with the above private key -const GenesisStateCommitmentHex = "3e84f2892fae21fc922c322b4a86c10c330012fdda678437baedad0c09e05e0b" +const GenesisStateCommitmentHex = "1fbd661d3e7833a92544b4a81253074e8c1b6509a732b584e7c9d1b5384421d1" var GenesisStateCommitment flow.StateCommitment From 096143ebdc0cac3879d53d7d1f2f2305baef7a55 Mon Sep 17 00:00:00 2001 From: lolpuddle Date: Mon, 18 Jul 2022 14:31:05 -0400 Subject: [PATCH 206/223] add more metrics --- .../access/rpc/backend/connection_factory.go | 56 ++++++++++++------- .../rpc/backend/connection_factory_test.go | 24 ++++---- engine/access/rpc/engine.go | 9 +-- module/metrics.go | 16 +++++- module/metrics/access.go | 54 ++++++++++++++++-- module/metrics/noop.go | 6 +- module/mock/access_metrics.go | 29 +++++++++- 7 files changed, 148 insertions(+), 46 deletions(-) diff --git a/engine/access/rpc/backend/connection_factory.go b/engine/access/rpc/backend/connection_factory.go index 1342e34ffbb..4c411b20f9d 100644 --- a/engine/access/rpc/backend/connection_factory.go +++ b/engine/access/rpc/backend/connection_factory.go @@ -56,10 +56,11 @@ type ConnectionFactoryImpl struct { mutex sync.Mutex } -type ConnectionCacheStore struct { +type CachedClient struct { ClientConn *grpc.ClientConn Address string - mutex *sync.Mutex + mutex sync.Mutex + timeout time.Duration } // createConnection creates new gRPC connections to remote node @@ -94,24 +95,24 @@ func (cf *ConnectionFactoryImpl) createConnection(address string, timeout time.D func (cf *ConnectionFactoryImpl) retrieveConnection(grpcAddress string, timeout time.Duration) (*grpc.ClientConn, error) { var conn *grpc.ClientConn - var store *ConnectionCacheStore - + var store *CachedClient + cacheHit := false cf.mutex.Lock() if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { - store = res.(*ConnectionCacheStore) + cacheHit = true + store = res.(*CachedClient) conn = store.ClientConn - if cf.AccessMetrics != nil { - cf.AccessMetrics.ConnectionFromPoolRetrieved() - } } else { - store = &ConnectionCacheStore{ + store = &CachedClient{ ClientConn: nil, Address: grpcAddress, - mutex: new(sync.Mutex), + timeout: timeout, } - cf.Log.Debug().Str("grpc_conn_added", grpcAddress).Msg("adding grpc connection to pool") + cf.Log.Debug().Str("cached_client_added", grpcAddress).Msg("adding new cached client to pool") cf.ConnectionsCache.Add(grpcAddress, store) - cf.AccessMetrics.ConnectionAddedToPool() + if cf.AccessMetrics != nil { + cf.AccessMetrics.ConnectionAddedToPool() + } } cf.mutex.Unlock() store.mutex.Lock() @@ -125,8 +126,14 @@ func (cf *ConnectionFactoryImpl) retrieveConnection(grpcAddress string, timeout } store.ClientConn = conn if cf.AccessMetrics != nil { + if cacheHit { + cf.AccessMetrics.ConnectionFromPoolUpdated() + } + cf.AccessMetrics.NewConnectionEstablished() cf.AccessMetrics.TotalConnectionsInPool(uint(cf.ConnectionsCache.Len()), cf.CacheSize) } + } else if cf.AccessMetrics != nil { + cf.AccessMetrics.ConnectionFromPoolReused() } return conn, nil } @@ -148,6 +155,7 @@ func (cf *ConnectionFactoryImpl) GetAccessAPIClient(address string) (access.Acce } func (cf *ConnectionFactoryImpl) InvalidateAccessAPIClient(address string) { + cf.Log.Debug().Str("cached_access_client_invalidated", address).Msg("invalidating cached access client") cf.invalidateAPIClient(address, cf.CollectionGRPCPort) } @@ -168,23 +176,31 @@ func (cf *ConnectionFactoryImpl) GetExecutionAPIClient(address string) (executio } func (cf *ConnectionFactoryImpl) InvalidateExecutionAPIClient(address string) { + cf.Log.Debug().Str("cached_execution_client_invalidated", address).Msg("invalidating cached execution client") cf.invalidateAPIClient(address, cf.ExecutionGRPCPort) } func (cf *ConnectionFactoryImpl) invalidateAPIClient(address string, port uint) { grpcAddress, _ := getGRPCAddress(address, port) if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { - store := res.(*ConnectionCacheStore) - conn := store.ClientConn - store.mutex.Lock() - store.ClientConn = nil - store.mutex.Unlock() - // allow time for any existing requests to finish before closing the connection - time.Sleep(DefaultClientTimeout) - conn.Close() + store := res.(CachedClient) + store.Close() + if cf.AccessMetrics != nil { + cf.AccessMetrics.ConnectionFromPoolInvalidated() + } } } +func (s *CachedClient) Close() { + s.mutex.Lock() + conn := s.ClientConn + s.ClientConn = nil + s.mutex.Unlock() + // allow time for any existing requests to finish before closing the connection + time.Sleep(s.timeout) + conn.Close() +} + // getExecutionNodeAddress translates flow.Identity address to the GRPC address of the node by switching the port to the // GRPC port from the libp2p port func getGRPCAddress(address string, grpcPort uint) (string, error) { diff --git a/engine/access/rpc/backend/connection_factory_test.go b/engine/access/rpc/backend/connection_factory_test.go index 22e7a6f64c0..ce11a19abf2 100644 --- a/engine/access/rpc/backend/connection_factory_test.go +++ b/engine/access/rpc/backend/connection_factory_test.go @@ -38,7 +38,7 @@ func TestProxyAccessAPI(t *testing.T) { connectionFactory.CollectionGRPCPort = cn.port // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*ConnectionCacheStore).ClientConn.Close() + evictedValue.(*CachedClient).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -76,7 +76,7 @@ func TestProxyExecutionAPI(t *testing.T) { connectionFactory.ExecutionGRPCPort = en.port // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*ConnectionCacheStore).ClientConn.Close() + evictedValue.(*CachedClient).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -114,7 +114,7 @@ func TestProxyAccessAPIConnectionReuse(t *testing.T) { connectionFactory.CollectionGRPCPort = cn.port // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*ConnectionCacheStore).ClientConn.Close() + evictedValue.(*CachedClient).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -133,7 +133,7 @@ func TestProxyAccessAPIConnectionReuse(t *testing.T) { var conn *grpc.ClientConn res, ok := connectionFactory.ConnectionsCache.Get(proxyConnectionFactory.targetAddress) assert.True(t, ok) - conn = res.(*ConnectionCacheStore).ClientConn + conn = res.(*CachedClient).ClientConn // check if api client can be rebuilt with retrieved connection accessAPIClient := access.NewAccessAPIClient(conn) @@ -159,7 +159,7 @@ func TestProxyExecutionAPIConnectionReuse(t *testing.T) { connectionFactory.ExecutionGRPCPort = en.port // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*ConnectionCacheStore).ClientConn.Close() + evictedValue.(*CachedClient).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -178,7 +178,7 @@ func TestProxyExecutionAPIConnectionReuse(t *testing.T) { var conn *grpc.ClientConn res, ok := connectionFactory.ConnectionsCache.Get(proxyConnectionFactory.targetAddress) assert.True(t, ok) - conn = res.(*ConnectionCacheStore).ClientConn + conn = res.(*CachedClient).ClientConn // check if api client can be rebuilt with retrieved connection executionAPIClient := execution.NewExecutionAPIClient(conn) @@ -211,7 +211,7 @@ func TestExecutionNodeClientTimeout(t *testing.T) { connectionFactory.ExecutionNodeGRPCTimeout = timeout // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*ConnectionCacheStore).ClientConn.Close() + evictedValue.(*CachedClient).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -252,7 +252,7 @@ func TestCollectionNodeClientTimeout(t *testing.T) { connectionFactory.CollectionNodeGRPCTimeout = timeout // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*ConnectionCacheStore).ClientConn.Close() + evictedValue.(*CachedClient).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -293,7 +293,7 @@ func TestConnectionPoolFull(t *testing.T) { connectionFactory.CollectionGRPCPort = cn1.port // set the connection pool cache size cache, _ := lru.NewWithEvict(2, func(_, evictedValue interface{}) { - evictedValue.(*ConnectionCacheStore).ClientConn.Close() + evictedValue.(*CachedClient).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -359,7 +359,7 @@ func TestConnectionPoolStale(t *testing.T) { // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*ConnectionCacheStore).ClientConn.Close() + evictedValue.(*CachedClient).ClientConn.Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -377,7 +377,7 @@ func TestConnectionPoolStale(t *testing.T) { // close connection to simulate something "going wrong" with our stored connection res, _ := connectionFactory.ConnectionsCache.Get(proxyConnectionFactory.targetAddress) - res.(*ConnectionCacheStore).ClientConn.Close() + res.(*CachedClient).ClientConn.Close() ctx := context.Background() // make the call to the collection node (should fail, connection closed) @@ -391,7 +391,7 @@ func TestConnectionPoolStale(t *testing.T) { var conn *grpc.ClientConn res, ok := connectionFactory.ConnectionsCache.Get(proxyConnectionFactory.targetAddress) assert.True(t, ok) - conn = res.(*ConnectionCacheStore).ClientConn + conn = res.(*CachedClient).ClientConn // check if api client can be rebuilt with retrieved connection accessAPIClient := access.NewAccessAPIClient(conn) diff --git a/engine/access/rpc/engine.go b/engine/access/rpc/engine.go index 2936a6564b9..15219f88dfe 100644 --- a/engine/access/rpc/engine.go +++ b/engine/access/rpc/engine.go @@ -137,11 +137,12 @@ func NewBuilder(log zerolog.Logger, cacheSize = backend.DefaultConnectionPoolSize } cache, err := lru.NewWithEvict(int(cacheSize), func(_, evictedValue interface{}) { - // allow time for any existing requests to finish before closing the connection - time.Sleep(backend.DefaultClientTimeout) - store := evictedValue.(*backend.ConnectionCacheStore) + store := evictedValue.(*backend.CachedClient) + store.Close() log.Debug().Str("grpc_conn_evicted", store.Address).Msg("closing grpc connection evicted from pool") - store.ClientConn.Close() + if accessMetrics != nil { + accessMetrics.ConnectionFromPoolEvicted() + } }) if err != nil { return nil, fmt.Errorf("could not initialize connection pool cache: %w", err) diff --git a/module/metrics.go b/module/metrics.go index 03e26b870f6..2f8482963d4 100644 --- a/module/metrics.go +++ b/module/metrics.go @@ -369,11 +369,23 @@ type AccessMetrics interface { // TotalConnectionsInPool updates the number connections to collection/execution nodes stored in the pool, and the size of the pool TotalConnectionsInPool(connectionCount uint, connectionPoolSize uint) - // ConnectionFromPoolRetrieved tracks the number of times a connection to a collection/execution node is retrieved from the connection pool - ConnectionFromPoolRetrieved() + // ConnectionFromPoolReused tracks the number of times a connection to a collection/execution node is reused from the connection pool + ConnectionFromPoolReused() // ConnectionAddedToPool tracks the number of times a collection/execution node is added to the connection pool ConnectionAddedToPool() + + // NewConnectionEstablished tracks the number of times a new grpc connection is established + NewConnectionEstablished() + + // ConnectionFromPoolInvalidated tracks the number of times a cached grpc connection is invalidated and closed + ConnectionFromPoolInvalidated() + + // ConnectionFromPoolUpdated tracks the number of times a cached connection is updated + ConnectionFromPoolUpdated() + + // ConnectionFromPoolEvicted tracks the number of times a cached connection is evicted from the cache + ConnectionFromPoolEvicted() } type ExecutionMetrics interface { diff --git a/module/metrics/access.go b/module/metrics/access.go index 9f8d978cb46..4dcfc6e6f38 100644 --- a/module/metrics/access.go +++ b/module/metrics/access.go @@ -6,9 +6,13 @@ import ( ) type AccessCollector struct { - connectionReused prometheus.Counter - connectionsInPool *prometheus.GaugeVec - connectionAdded prometheus.Counter + connectionReused prometheus.Counter + connectionsInPool *prometheus.GaugeVec + connectionAdded prometheus.Counter + connectionEstablished prometheus.Counter + connectionInvalidated prometheus.Counter + connectionUpdated prometheus.Counter + connectionEvicted prometheus.Counter } func NewAccessCollector() *AccessCollector { @@ -29,14 +33,38 @@ func NewAccessCollector() *AccessCollector { Name: "connection_added", Namespace: namespaceAccess, Subsystem: subsystemConnectionPool, - Help: "counter for the number of times connections are added", + Help: "counter for the number of times connections are added to the pool", + }), + connectionEstablished: promauto.NewCounter(prometheus.CounterOpts{ + Name: "connection_established", + Namespace: namespaceAccess, + Subsystem: subsystemConnectionPool, + Help: "counter for the number of times connections are established", + }), + connectionInvalidated: promauto.NewCounter(prometheus.CounterOpts{ + Name: "connection_invalidated", + Namespace: namespaceAccess, + Subsystem: subsystemConnectionPool, + Help: "counter for the number of times connections are invalidated", + }), + connectionUpdated: promauto.NewCounter(prometheus.CounterOpts{ + Name: "connection_updated", + Namespace: namespaceAccess, + Subsystem: subsystemConnectionPool, + Help: "counter for the number of times existing connections from the pool are updated", + }), + connectionEvicted: promauto.NewCounter(prometheus.CounterOpts{ + Name: "connection_evicted", + Namespace: namespaceAccess, + Subsystem: subsystemConnectionPool, + Help: "counter for the number of times a cached connection is evicted from the connection pool", }), } return ac } -func (ac *AccessCollector) ConnectionFromPoolRetrieved() { +func (ac *AccessCollector) ConnectionFromPoolReused() { ac.connectionReused.Inc() } @@ -48,3 +76,19 @@ func (ac *AccessCollector) TotalConnectionsInPool(connectionCount uint, connecti func (ac *AccessCollector) ConnectionAddedToPool() { ac.connectionAdded.Inc() } + +func (ac *AccessCollector) NewConnectionEstablished() { + ac.connectionEstablished.Inc() +} + +func (ac *AccessCollector) ConnectionFromPoolInvalidated() { + ac.connectionInvalidated.Inc() +} + +func (ac *AccessCollector) ConnectionFromPoolUpdated() { + ac.connectionUpdated.Inc() +} + +func (ac *AccessCollector) ConnectionFromPoolEvicted() { + ac.connectionEvicted.Inc() +} diff --git a/module/metrics/noop.go b/module/metrics/noop.go index 12d4e58986b..c3c8027b033 100644 --- a/module/metrics/noop.go +++ b/module/metrics/noop.go @@ -111,8 +111,12 @@ func (nc *NoopCollector) OnBlockConsumerJobDone(uint64) func (nc *NoopCollector) OnChunkConsumerJobDone(uint64) {} func (nc *NoopCollector) OnChunkDataPackResponseReceivedFromNetworkByRequester() {} func (nc *NoopCollector) TotalConnectionsInPool(connectionCount uint, connectionPoolSize uint) {} -func (nc *NoopCollector) ConnectionFromPoolRetrieved() {} +func (nc *NoopCollector) ConnectionFromPoolReused() {} func (nc *NoopCollector) ConnectionAddedToPool() {} +func (nc *NoopCollector) NewConnectionEstablished() {} +func (nc *NoopCollector) ConnectionFromPoolInvalidated() {} +func (nc *NoopCollector) ConnectionFromPoolUpdated() {} +func (nc *NoopCollector) ConnectionFromPoolEvicted() {} func (nc *NoopCollector) StartBlockReceivedToExecuted(blockID flow.Identifier) {} func (nc *NoopCollector) FinishBlockReceivedToExecuted(blockID flow.Identifier) {} func (nc *NoopCollector) ExecutionComputationUsedPerBlock(computation uint64) {} diff --git a/module/mock/access_metrics.go b/module/mock/access_metrics.go index 3eaa4d457f4..7ecb2f8e6a2 100644 --- a/module/mock/access_metrics.go +++ b/module/mock/access_metrics.go @@ -9,8 +9,33 @@ type AccessMetrics struct { mock.Mock } -// ConnectionFromPoolRetrieved provides a mock function with given fields: -func (_m *AccessMetrics) ConnectionFromPoolRetrieved() { +// ConnectionAddedToPool provides a mock function with given fields: +func (_m *AccessMetrics) ConnectionAddedToPool() { + _m.Called() +} + +// ConnectionFromPoolEvicted provides a mock function with given fields: +func (_m *AccessMetrics) ConnectionFromPoolEvicted() { + _m.Called() +} + +// ConnectionFromPoolInvalidated provides a mock function with given fields: +func (_m *AccessMetrics) ConnectionFromPoolInvalidated() { + _m.Called() +} + +// ConnectionFromPoolReused provides a mock function with given fields: +func (_m *AccessMetrics) ConnectionFromPoolReused() { + _m.Called() +} + +// ConnectionFromPoolUpdated provides a mock function with given fields: +func (_m *AccessMetrics) ConnectionFromPoolUpdated() { + _m.Called() +} + +// NewConnectionEstablished provides a mock function with given fields: +func (_m *AccessMetrics) NewConnectionEstablished() { _m.Called() } From 68e1a6c4562a24aa5fa3201f4ff5ce3a9e449a9b Mon Sep 17 00:00:00 2001 From: lolpuddle Date: Mon, 18 Jul 2022 14:52:10 -0400 Subject: [PATCH 207/223] lint fix --- engine/access/rpc/backend/connection_factory.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/access/rpc/backend/connection_factory.go b/engine/access/rpc/backend/connection_factory.go index 4c411b20f9d..a28f3e4aad7 100644 --- a/engine/access/rpc/backend/connection_factory.go +++ b/engine/access/rpc/backend/connection_factory.go @@ -183,7 +183,7 @@ func (cf *ConnectionFactoryImpl) InvalidateExecutionAPIClient(address string) { func (cf *ConnectionFactoryImpl) invalidateAPIClient(address string, port uint) { grpcAddress, _ := getGRPCAddress(address, port) if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { - store := res.(CachedClient) + store := res.(*CachedClient) store.Close() if cf.AccessMetrics != nil { cf.AccessMetrics.ConnectionFromPoolInvalidated() From 3897ba25eeccd458f0e7d4540e4b1c433528d487 Mon Sep 17 00:00:00 2001 From: Tony Z Date: Mon, 18 Jul 2022 11:43:45 -0700 Subject: [PATCH 208/223] [Integration] splitting Cadence files from go source --- integration/utils/scripts.go | 210 ++---------------- .../scripts/addKeyToAccountTransaction.cdc | 15 ++ .../utils/scripts/compHeavyTransaction.cdc | 8 + .../scripts/createAccountsTransaction.cdc | 31 +++ .../deployingMyFavContractTransaction.cdc | 5 + .../utils/scripts/eventHeavyTransaction.cdc | 8 + .../utils/scripts/ledgerHeavyTransaction.cdc | 8 + integration/utils/scripts/myFavContract.cdc | 80 +++++++ .../scripts/tokenTransferTransaction.cdc | 20 ++ 9 files changed, 194 insertions(+), 191 deletions(-) create mode 100644 integration/utils/scripts/addKeyToAccountTransaction.cdc create mode 100644 integration/utils/scripts/compHeavyTransaction.cdc create mode 100644 integration/utils/scripts/createAccountsTransaction.cdc create mode 100644 integration/utils/scripts/deployingMyFavContractTransaction.cdc create mode 100644 integration/utils/scripts/eventHeavyTransaction.cdc create mode 100644 integration/utils/scripts/ledgerHeavyTransaction.cdc create mode 100644 integration/utils/scripts/myFavContract.cdc create mode 100644 integration/utils/scripts/tokenTransferTransaction.cdc diff --git a/integration/utils/scripts.go b/integration/utils/scripts.go index 689ed3e3ce3..5ab94947801 100644 --- a/integration/utils/scripts.go +++ b/integration/utils/scripts.go @@ -1,6 +1,7 @@ package utils import ( + _ "embed" "encoding/hex" "fmt" "strings" @@ -9,28 +10,8 @@ import ( flowsdk "github.com/onflow/flow-go-sdk" ) -const tokenTransferTransactionTemplate = ` -import FungibleToken from 0xFUNGIBLETOKENADDRESS -import FlowToken from 0xTOKENADDRESS - -transaction(amount: UFix64, to: Address) { - let sentVault: @FungibleToken.Vault - - prepare(signer: AuthAccount) { - let vaultRef = signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) - ?? panic("Could not borrow reference to the owner's Vault!") - self.sentVault <- vaultRef.withdraw(amount: amount) - } - - execute { - let receiverRef = getAccount(to) - .getCapability(/public/flowTokenReceiver) - .borrow<&{FungibleToken.Receiver}>() - ?? panic("Could not borrow receiver reference to the recipient's Vault") - receiverRef.deposit(from: <-self.sentVault) - } -} -` +//go:embed scripts/tokenTransferTransaction.cdc +var tokenTransferTransactionTemplate string // TokenTransferTransaction returns a transaction script for transferring `amount` flow tokens to `toAddr` address func TokenTransferTransaction(ftAddr, flowToken, toAddr *flowsdk.Address, amount float64) (*flowsdk.Transaction, error) { @@ -58,202 +39,49 @@ func TokenTransferTransaction(ftAddr, flowToken, toAddr *flowsdk.Address, amount return tx, nil } +//go:embed scripts/addKeyToAccountTransaction.cdc +var addKeyToAccountTransactionTemplate string + // AddKeyToAccountScript returns a transaction script to add keys to an account func AddKeyToAccountScript() ([]byte, error) { - return []byte(` - transaction(keys: [[UInt8]]) { - prepare(signer: AuthAccount) { - for key in keys { - let publicKey = PublicKey( - publicKey: key, - signatureAlgorithm: SignatureAlgorithm.ECDSA_P256 - ) - signer.keys.add( - publicKey: publicKey, - hashAlgorithm: HashAlgorithm.SHA3_256, - weight: 1000.0 - ) - } - } - } - `), nil + return []byte(addKeyToAccountTransactionTemplate), nil } -const createAccountsScriptTemplate = ` -import FungibleToken from 0x%s -import FlowToken from 0x%s - -transaction(publicKey: [UInt8], count: Int, initialTokenAmount: UFix64) { - prepare(signer: AuthAccount) { - let vault = signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) - ?? panic("Could not borrow reference to the owner's Vault") - - var i = 0 - while i < count { - let account = AuthAccount(payer: signer) - let publicKey2 = PublicKey( - publicKey: publicKey, - signatureAlgorithm: SignatureAlgorithm.ECDSA_P256 - ) - account.keys.add( - publicKey: publicKey2, - hashAlgorithm: HashAlgorithm.SHA3_256, - weight: 1000.0 - ) - - let receiver = account.getCapability(/public/flowTokenReceiver) - .borrow<&{FungibleToken.Receiver}>() - ?? panic("Could not borrow receiver reference to the recipient's Vault") - - receiver.deposit(from: <-vault.withdraw(amount: initialTokenAmount)) - - i = i + 1 - } - } -} -` +//go:embed scripts/createAccountsTransaction.cdc +var createAccountsScriptTemplate string // CreateAccountsScript returns a transaction script for creating an account func CreateAccountsScript(fungibleToken, flowToken flowsdk.Address) []byte { return []byte(fmt.Sprintf(createAccountsScriptTemplate, fungibleToken, flowToken)) } -const myFavContract = ` -access(all) contract MyFavContract { - - init() { - self.itemCounter = UInt32(0) - self.items = [] - } - - // items - access(all) event NewItemAddedEvent(id: UInt32, metadata: {String: String}) - - access(self) var itemCounter: UInt32 - - access(all) struct Item { - - pub let itemID: UInt32 - - pub let metadata: {String: String} - - init(_ metadata: {String: String}) { - self.itemID = MyFavContract.itemCounter - self.metadata = metadata +//go:embed scripts/myFavContract.cdc +var myFavContract string - // inc the counter - MyFavContract.itemCounter = MyFavContract.itemCounter + UInt32(1) - - // emit event - emit NewItemAddedEvent(id: self.itemID, metadata: self.metadata) - } - } - - access(self) var items: [Item] - - access(all) fun AddItem(_ metadata: {String: String}){ - let item = Item(metadata) - self.items.append(item) - } - - access(all) fun AddManyRandomItems(_ n: Int){ - var i = 0 - while i < n { - MyFavContract.AddItem({"data": "ABCDEFGHIJKLMNOP"}) - i = i + 1 - } - } - - // heavy operations - // computation heavy function - access(all) fun ComputationHeavy() { - var s: Int256 = 1024102410241024 - var i = 0 - var a = Int256(7) - var b = Int256(5) - var c = Int256(2) - while i < 15000 { - s = s * a - s = s / b - s = s / c - i = i + 1 - } - log(i) - } - - access(all) event LargeEvent(value: Int256, str: String, list: [UInt256], dic: {String: String}) - - // event heavy function - access(all) fun EventHeavy() { - var s: Int256 = 1024102410241024 - var i = 0 - - while i < 220 { - emit LargeEvent(value: s, str: s.toString(), list:[], dic:{s.toString():s.toString()}) - i = i + 1 - } - log(i) - } - - access(all) fun LedgerInteractionHeavy() { - MyFavContract.AddManyRandomItems(700) - } -} -` - -const deployingMyFavContractScriptTemplate = ` -transaction { - prepare(signer: AuthAccount) { - signer.contracts.add(name: "%s", code: "%s".decodeHex()) - } -} -` +//go:embed scripts/deployingMyFavContractTransaction.cdc +var deployingMyFavContractScriptTemplate string func DeployingMyFavContractScript() []byte { return []byte(fmt.Sprintf(deployingMyFavContractScriptTemplate, "MyFavContract", hex.EncodeToString([]byte(myFavContract)))) } -const eventHeavyScriptTemplate = ` -import MyFavContract from 0x%s - -transaction { - prepare(acct: AuthAccount) {} - execute { - MyFavContract.EventHeavy() - } -} -` +//go:embed scripts/eventHeavyTransaction.cdc +var eventHeavyScriptTemplate string func EventHeavyScript(favContractAddress flowsdk.Address) []byte { return []byte(fmt.Sprintf(eventHeavyScriptTemplate, favContractAddress)) } -const compHeavyScriptTemplate = ` -import MyFavContract from 0x%s - -transaction { - prepare(acct: AuthAccount) {} - execute { - MyFavContract.ComputationHeavy() - } -} -` +//go:embed scripts/compHeavyTransaction.cdc +var compHeavyScriptTemplate string func ComputationHeavyScript(favContractAddress flowsdk.Address) []byte { return []byte(fmt.Sprintf(compHeavyScriptTemplate, favContractAddress)) } -const ledgerHeavyScriptTemplate = ` -import MyFavContract from 0x%s - -transaction { - prepare(acct: AuthAccount) {} - execute { - MyFavContract.LedgerInteractionHeavy() - } -} -` +//go:embed scripts/ledgerHeavyTransaction.cdc +var ledgerHeavyScriptTemplate string func LedgerHeavyScript(favContractAddress flowsdk.Address) []byte { return []byte(fmt.Sprintf(ledgerHeavyScriptTemplate, favContractAddress)) diff --git a/integration/utils/scripts/addKeyToAccountTransaction.cdc b/integration/utils/scripts/addKeyToAccountTransaction.cdc new file mode 100644 index 00000000000..dae8ccb9b47 --- /dev/null +++ b/integration/utils/scripts/addKeyToAccountTransaction.cdc @@ -0,0 +1,15 @@ +transaction(keys: [[UInt8]]) { + prepare(signer: AuthAccount) { + for key in keys { + let publicKey = PublicKey( + publicKey: key, + signatureAlgorithm: SignatureAlgorithm.ECDSA_P256 + ) + signer.keys.add( + publicKey: publicKey, + hashAlgorithm: HashAlgorithm.SHA3_256, + weight: 1000.0 + ) + } + } +} diff --git a/integration/utils/scripts/compHeavyTransaction.cdc b/integration/utils/scripts/compHeavyTransaction.cdc new file mode 100644 index 00000000000..00215f27b8f --- /dev/null +++ b/integration/utils/scripts/compHeavyTransaction.cdc @@ -0,0 +1,8 @@ +import MyFavContract from 0x%s + +transaction { + prepare(acct: AuthAccount) {} + execute { + MyFavContract.ComputationHeavy() + } +} diff --git a/integration/utils/scripts/createAccountsTransaction.cdc b/integration/utils/scripts/createAccountsTransaction.cdc new file mode 100644 index 00000000000..8f925b996e2 --- /dev/null +++ b/integration/utils/scripts/createAccountsTransaction.cdc @@ -0,0 +1,31 @@ +import FungibleToken from 0x%s +import FlowToken from 0x%s + +transaction(publicKey: [UInt8], count: Int, initialTokenAmount: UFix64) { + prepare(signer: AuthAccount) { + let vault = signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) + ?? panic("Could not borrow reference to the owner's Vault") + + var i = 0 + while i < count { + let account = AuthAccount(payer: signer) + let publicKey2 = PublicKey( + publicKey: publicKey, + signatureAlgorithm: SignatureAlgorithm.ECDSA_P256 + ) + account.keys.add( + publicKey: publicKey2, + hashAlgorithm: HashAlgorithm.SHA3_256, + weight: 1000.0 + ) + + let receiver = account.getCapability(/public/flowTokenReceiver) + .borrow<&{FungibleToken.Receiver}>() + ?? panic("Could not borrow receiver reference to the recipient's Vault") + + receiver.deposit(from: <-vault.withdraw(amount: initialTokenAmount)) + + i = i + 1 + } + } +} diff --git a/integration/utils/scripts/deployingMyFavContractTransaction.cdc b/integration/utils/scripts/deployingMyFavContractTransaction.cdc new file mode 100644 index 00000000000..921ec76e26d --- /dev/null +++ b/integration/utils/scripts/deployingMyFavContractTransaction.cdc @@ -0,0 +1,5 @@ +transaction { + prepare(signer: AuthAccount) { + signer.contracts.add(name: "%s", code: "%s".decodeHex()) + } +} diff --git a/integration/utils/scripts/eventHeavyTransaction.cdc b/integration/utils/scripts/eventHeavyTransaction.cdc new file mode 100644 index 00000000000..b9df85b2192 --- /dev/null +++ b/integration/utils/scripts/eventHeavyTransaction.cdc @@ -0,0 +1,8 @@ +import MyFavContract from 0x%s + +transaction { + prepare(acct: AuthAccount) {} + execute { + MyFavContract.EventHeavy() + } +} diff --git a/integration/utils/scripts/ledgerHeavyTransaction.cdc b/integration/utils/scripts/ledgerHeavyTransaction.cdc new file mode 100644 index 00000000000..0b07e590c8a --- /dev/null +++ b/integration/utils/scripts/ledgerHeavyTransaction.cdc @@ -0,0 +1,8 @@ +import MyFavContract from 0x%s + +transaction { + prepare(acct: AuthAccount) {} + execute { + MyFavContract.LedgerInteractionHeavy() + } +} diff --git a/integration/utils/scripts/myFavContract.cdc b/integration/utils/scripts/myFavContract.cdc new file mode 100644 index 00000000000..48182a11431 --- /dev/null +++ b/integration/utils/scripts/myFavContract.cdc @@ -0,0 +1,80 @@ +access(all) contract MyFavContract { + + init() { + self.itemCounter = UInt32(0) + self.items = [] + } + + // items + access(all) event NewItemAddedEvent(id: UInt32, metadata: {String: String}) + + access(self) var itemCounter: UInt32 + + access(all) struct Item { + + pub let itemID: UInt32 + + pub let metadata: {String: String} + + init(_ metadata: {String: String}) { + self.itemID = MyFavContract.itemCounter + self.metadata = metadata + + // inc the counter + MyFavContract.itemCounter = MyFavContract.itemCounter + UInt32(1) + + // emit event + emit NewItemAddedEvent(id: self.itemID, metadata: self.metadata) + } + } + + access(self) var items: [Item] + + access(all) fun AddItem(_ metadata: {String: String}){ + let item = Item(metadata) + self.items.append(item) + } + + access(all) fun AddManyRandomItems(_ n: Int){ + var i = 0 + while i < n { + MyFavContract.AddItem({"data": "ABCDEFGHIJKLMNOP"}) + i = i + 1 + } + } + + // heavy operations + // computation heavy function + access(all) fun ComputationHeavy() { + var s: Int256 = 1024102410241024 + var i = 0 + var a = Int256(7) + var b = Int256(5) + var c = Int256(2) + while i < 15000 { + s = s * a + s = s / b + s = s / c + i = i + 1 + } + log(i) + } + + access(all) event LargeEvent(value: Int256, str: String, list: [UInt256], dic: {String: String}) + + // event heavy function + access(all) fun EventHeavy() { + var s: Int256 = 1024102410241024 + var i = 0 + + while i < 220 { + emit LargeEvent(value: s, str: s.toString(), list:[], dic:{s.toString():s.toString()}) + i = i + 1 + } + log(i) + } + + access(all) fun LedgerInteractionHeavy() { + MyFavContract.AddManyRandomItems(700) + } +} diff --git a/integration/utils/scripts/tokenTransferTransaction.cdc b/integration/utils/scripts/tokenTransferTransaction.cdc new file mode 100644 index 00000000000..31057f6bf25 --- /dev/null +++ b/integration/utils/scripts/tokenTransferTransaction.cdc @@ -0,0 +1,20 @@ +import FungibleToken from 0xFUNGIBLETOKENADDRESS +import FlowToken from 0xTOKENADDRESS + +transaction(amount: UFix64, to: Address) { + let sentVault: @FungibleToken.Vault + + prepare(signer: AuthAccount) { + let vaultRef = signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) + ?? panic("Could not borrow reference to the owner's Vault!") + self.sentVault <- vaultRef.withdraw(amount: amount) + } + + execute { + let receiverRef = getAccount(to) + .getCapability(/public/flowTokenReceiver) + .borrow<&{FungibleToken.Receiver}>() + ?? panic("Could not borrow receiver reference to the recipient's Vault") + receiverRef.deposit(from: <-self.sentVault) + } +} From 2c8fd1e6cb3d8690a0d4fc6afa24c12c99e30d48 Mon Sep 17 00:00:00 2001 From: lolpuddle Date: Mon, 18 Jul 2022 17:11:05 -0400 Subject: [PATCH 209/223] update tests --- .../access/rpc/backend/connection_factory.go | 4 +++- .../rpc/backend/connection_factory_test.go | 18 +++++++++--------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/engine/access/rpc/backend/connection_factory.go b/engine/access/rpc/backend/connection_factory.go index a28f3e4aad7..25652f0f883 100644 --- a/engine/access/rpc/backend/connection_factory.go +++ b/engine/access/rpc/backend/connection_factory.go @@ -198,7 +198,9 @@ func (s *CachedClient) Close() { s.mutex.Unlock() // allow time for any existing requests to finish before closing the connection time.Sleep(s.timeout) - conn.Close() + if conn != nil { + conn.Close() + } } // getExecutionNodeAddress translates flow.Identity address to the GRPC address of the node by switching the port to the diff --git a/engine/access/rpc/backend/connection_factory_test.go b/engine/access/rpc/backend/connection_factory_test.go index ce11a19abf2..2caaca3c6c4 100644 --- a/engine/access/rpc/backend/connection_factory_test.go +++ b/engine/access/rpc/backend/connection_factory_test.go @@ -38,7 +38,7 @@ func TestProxyAccessAPI(t *testing.T) { connectionFactory.CollectionGRPCPort = cn.port // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*CachedClient).ClientConn.Close() + evictedValue.(*CachedClient).Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -76,7 +76,7 @@ func TestProxyExecutionAPI(t *testing.T) { connectionFactory.ExecutionGRPCPort = en.port // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*CachedClient).ClientConn.Close() + evictedValue.(*CachedClient).Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -114,7 +114,7 @@ func TestProxyAccessAPIConnectionReuse(t *testing.T) { connectionFactory.CollectionGRPCPort = cn.port // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*CachedClient).ClientConn.Close() + evictedValue.(*CachedClient).Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -159,7 +159,7 @@ func TestProxyExecutionAPIConnectionReuse(t *testing.T) { connectionFactory.ExecutionGRPCPort = en.port // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*CachedClient).ClientConn.Close() + evictedValue.(*CachedClient).Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -211,7 +211,7 @@ func TestExecutionNodeClientTimeout(t *testing.T) { connectionFactory.ExecutionNodeGRPCTimeout = timeout // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*CachedClient).ClientConn.Close() + evictedValue.(*CachedClient).Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -252,7 +252,7 @@ func TestCollectionNodeClientTimeout(t *testing.T) { connectionFactory.CollectionNodeGRPCTimeout = timeout // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*CachedClient).ClientConn.Close() + evictedValue.(*CachedClient).Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -293,7 +293,7 @@ func TestConnectionPoolFull(t *testing.T) { connectionFactory.CollectionGRPCPort = cn1.port // set the connection pool cache size cache, _ := lru.NewWithEvict(2, func(_, evictedValue interface{}) { - evictedValue.(*CachedClient).ClientConn.Close() + evictedValue.(*CachedClient).Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -359,7 +359,7 @@ func TestConnectionPoolStale(t *testing.T) { // set the connection pool cache size cache, _ := lru.NewWithEvict(5, func(_, evictedValue interface{}) { - evictedValue.(*CachedClient).ClientConn.Close() + evictedValue.(*CachedClient).Close() }) connectionFactory.ConnectionsCache = cache // set metrics reporting @@ -377,7 +377,7 @@ func TestConnectionPoolStale(t *testing.T) { // close connection to simulate something "going wrong" with our stored connection res, _ := connectionFactory.ConnectionsCache.Get(proxyConnectionFactory.targetAddress) - res.(*CachedClient).ClientConn.Close() + res.(*CachedClient).Close() ctx := context.Background() // make the call to the collection node (should fail, connection closed) From d207018fde8cd190df4fbf6b3890b2911b6aef27 Mon Sep 17 00:00:00 2001 From: lolpuddle Date: Mon, 18 Jul 2022 17:18:37 -0400 Subject: [PATCH 210/223] sleep only when close needed --- engine/access/rpc/backend/connection_factory.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/engine/access/rpc/backend/connection_factory.go b/engine/access/rpc/backend/connection_factory.go index 25652f0f883..39a329057d5 100644 --- a/engine/access/rpc/backend/connection_factory.go +++ b/engine/access/rpc/backend/connection_factory.go @@ -196,11 +196,12 @@ func (s *CachedClient) Close() { conn := s.ClientConn s.ClientConn = nil s.mutex.Unlock() - // allow time for any existing requests to finish before closing the connection - time.Sleep(s.timeout) if conn != nil { - conn.Close() + return } + // allow time for any existing requests to finish before closing the connection + time.Sleep(s.timeout) + conn.Close() } // getExecutionNodeAddress translates flow.Identity address to the GRPC address of the node by switching the port to the From 7b6b244217fe9e8456886fca69d64c60d5ecfae9 Mon Sep 17 00:00:00 2001 From: lolpuddle Date: Mon, 18 Jul 2022 17:39:26 -0400 Subject: [PATCH 211/223] configurable cache size --- cmd/access/node_builder/access_node_builder.go | 5 +++++ engine/access/rpc/backend/connection_factory.go | 2 +- engine/access/rpc/engine.go | 1 + 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index 1d84856914b..aa9be529cd9 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -137,6 +137,7 @@ func DefaultAccessNodeConfig() *AccessNodeConfig { HistoricalAccessAddrs: "", CollectionClientTimeout: 3 * time.Second, ExecutionClientTimeout: 3 * time.Second, + ConnectionPoolSize: backend.DefaultConnectionPoolSize, MaxHeightRange: backend.DefaultMaxHeightRange, PreferredExecutionNodeIDs: nil, FixedExecutionNodeIDs: nil, @@ -545,6 +546,7 @@ func (builder *FlowAccessNodeBuilder) extraFlags() { flags.StringVarP(&builder.rpcConf.HistoricalAccessAddrs, "historical-access-addr", "", defaultConfig.rpcConf.HistoricalAccessAddrs, "comma separated rpc addresses for historical access nodes") flags.DurationVar(&builder.rpcConf.CollectionClientTimeout, "collection-client-timeout", defaultConfig.rpcConf.CollectionClientTimeout, "grpc client timeout for a collection node") flags.DurationVar(&builder.rpcConf.ExecutionClientTimeout, "execution-client-timeout", defaultConfig.rpcConf.ExecutionClientTimeout, "grpc client timeout for an execution node") + flags.UintVar(&builder.rpcConf.ConnectionPoolSize, "connection-pool-size", defaultConfig.rpcConf.ConnectionPoolSize, "maximum number of connections allowed in the connection pool") flags.UintVar(&builder.rpcConf.MaxHeightRange, "rpc-max-height-range", defaultConfig.rpcConf.MaxHeightRange, "maximum size for height range requests") flags.StringSliceVar(&builder.rpcConf.PreferredExecutionNodeIDs, "preferred-execution-node-ids", defaultConfig.rpcConf.PreferredExecutionNodeIDs, "comma separated list of execution nodes ids to choose from when making an upstream call e.g. b4a4dbdcd443d...,fb386a6a... etc.") flags.StringSliceVar(&builder.rpcConf.FixedExecutionNodeIDs, "fixed-execution-node-ids", defaultConfig.rpcConf.FixedExecutionNodeIDs, "comma separated list of execution nodes ids to choose from when making an upstream call if no matching preferred execution id is found e.g. b4a4dbdcd443d...,fb386a6a... etc.") @@ -589,6 +591,9 @@ func (builder *FlowAccessNodeBuilder) extraFlags() { return errors.New("execution-data-max-search-ahead must be greater than 0") } } + if builder.rpcConf.ConnectionPoolSize == 0 { + return errors.New("connection-pool-size must be greater than 0") + } return nil }) diff --git a/engine/access/rpc/backend/connection_factory.go b/engine/access/rpc/backend/connection_factory.go index 39a329057d5..1bf386d131d 100644 --- a/engine/access/rpc/backend/connection_factory.go +++ b/engine/access/rpc/backend/connection_factory.go @@ -196,7 +196,7 @@ func (s *CachedClient) Close() { conn := s.ClientConn s.ClientConn = nil s.mutex.Unlock() - if conn != nil { + if conn == nil { return } // allow time for any existing requests to finish before closing the connection diff --git a/engine/access/rpc/engine.go b/engine/access/rpc/engine.go index 15219f88dfe..64521ff3585 100644 --- a/engine/access/rpc/engine.go +++ b/engine/access/rpc/engine.go @@ -132,6 +132,7 @@ func NewBuilder(log zerolog.Logger, // wrap the unsecured server with an HTTP proxy server to serve HTTP clients httpServer := NewHTTPServer(unsecureGrpcServer, config.HTTPListenAddr) + // TODO: when cache size is set to 0, handle case where we do not use cache cacheSize := config.ConnectionPoolSize if cacheSize == 0 { cacheSize = backend.DefaultConnectionPoolSize From c50c5e9bc2452b92dcb3a348f2046fce917f1c3a Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Mon, 18 Jul 2022 16:13:00 -0700 Subject: [PATCH 212/223] Remove GO111MODULE=on everywhere --- Makefile | 100 ++++++++++++------------- cmd/Dockerfile | 4 +- crypto/Makefile | 10 +-- integration/Makefile | 26 +++---- integration/loader/Dockerfile | 2 +- utils/binstat/README.md | 2 +- utils/binstat/binstat_external_test.go | 2 +- 7 files changed, 73 insertions(+), 73 deletions(-) diff --git a/Makefile b/Makefile index af8a5e8962c..e7672d8e1c4 100644 --- a/Makefile +++ b/Makefile @@ -53,13 +53,13 @@ cmd/util/util: .PHONY: unittest-main unittest-main: # test all packages with Relic library enabled - GO111MODULE=on go test -coverprofile=$(COVER_PROFILE) -covermode=atomic $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) --tags relic ./... + go test -coverprofile=$(COVER_PROFILE) -covermode=atomic $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) --tags relic ./... .PHONY: install-mock-generators install-mock-generators: cd ${GOPATH}; \ - GO111MODULE=on go install github.com/vektra/mockery/v2@v2.13.0; \ - GO111MODULE=on go install github.com/golang/mock/mockgen@v1.3.1; + go install github.com/vektra/mockery/v2@v2.13.0; \ + go install github.com/golang/mock/mockgen@v1.3.1; ############################################################################################ @@ -67,10 +67,10 @@ install-mock-generators: install-tools: crypto_setup_tests crypto_setup_gopath check-go-version install-mock-generators curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ${GOPATH}/bin v1.46.2; \ cd ${GOPATH}; \ - GO111MODULE=on go install github.com/golang/protobuf/protoc-gen-go@v1.3.2; \ - GO111MODULE=on go install github.com/uber/prototool/cmd/prototool@v1.9.0; \ - GO111MODULE=on go install github.com/gogo/protobuf/protoc-gen-gofast@latest; \ - GO111MODULE=on go install golang.org/x/tools/cmd/stringer@master; + go install github.com/golang/protobuf/protoc-gen-go@v1.3.2; \ + go install github.com/uber/prototool/cmd/prototool@v1.9.0; \ + go install github.com/gogo/protobuf/protoc-gen-gofast@latest; \ + go install golang.org/x/tools/cmd/stringer@master; .PHONY: unittest unittest: unittest-main @@ -80,7 +80,7 @@ unittest: unittest-main .PHONY: emulator-build emulator-build: # test the fvm package compiles with Relic library disabled (required for the emulator build) - cd ./fvm && GO111MODULE=on go test ./... -run=NoTestHasThisPrefix + cd ./fvm && go test ./... -run=NoTestHasThisPrefix .PHONY: test test: generate-mocks emulator-build unittest @@ -118,46 +118,46 @@ generate-proto: .PHONY: generate-mocks generate-mocks: - GO111MODULE=on mockery --name '(Connector|PingInfoProvider)' --dir=network/p2p --case=underscore --output="./network/mocknetwork" --outpkg="mocknetwork" - GO111MODULE=on mockgen -destination=storage/mocks/storage.go -package=mocks github.com/onflow/flow-go/storage Blocks,Headers,Payloads,Collections,Commits,Events,ServiceEvents,TransactionResults - GO111MODULE=on mockgen -destination=module/mocks/network.go -package=mocks github.com/onflow/flow-go/module Local,Requester - GO111MODULE=on mockgen -destination=network/mocknetwork/engine.go -package=mocknetwork github.com/onflow/flow-go/network Engine - GO111MODULE=on mockgen -destination=network/mocknetwork/mock_network.go -package=mocknetwork github.com/onflow/flow-go/network Network - GO111MODULE=on mockery --name '(ExecutionDataService|ExecutionDataCIDCache|ExecutionDataRequester)' --dir=module/state_synchronization --case=underscore --output="./module/state_synchronization/mock" --outpkg="state_synchronization" - GO111MODULE=on mockery --name 'ExecutionState' --dir=engine/execution/state --case=underscore --output="engine/execution/state/mock" --outpkg="mock" - GO111MODULE=on mockery --name 'BlockComputer' --dir=engine/execution/computation/computer --case=underscore --output="engine/execution/computation/computer/mock" --outpkg="mock" - GO111MODULE=on mockery --name 'ComputationManager' --dir=engine/execution/computation --case=underscore --output="engine/execution/computation/mock" --outpkg="mock" - GO111MODULE=on mockery --name 'EpochComponentsFactory' --dir=engine/collection/epochmgr --case=underscore --output="engine/collection/epochmgr/mock" --outpkg="mock" - GO111MODULE=on mockery --name 'Backend' --dir=engine/collection/rpc --case=underscore --output="engine/collection/rpc/mock" --outpkg="mock" - GO111MODULE=on mockery --name 'ProviderEngine' --dir=engine/execution/provider --case=underscore --output="engine/execution/provider/mock" --outpkg="mock" - (cd ./crypto && GO111MODULE=on mockery --name 'PublicKey' --case=underscore --output="../module/mock" --outpkg="mock") - GO111MODULE=on mockery --name '.*' --dir=state/cluster --case=underscore --output="state/cluster/mock" --outpkg="mock" - GO111MODULE=on mockery --name '.*' --dir=module --case=underscore --tags="relic" --output="./module/mock" --outpkg="mock" - GO111MODULE=on mockery --name '.*' --dir=module/mempool --case=underscore --output="./module/mempool/mock" --outpkg="mempool" - GO111MODULE=on mockery --name '.*' --dir=module/component --case=underscore --output="./module/component/mock" --outpkg="component" - GO111MODULE=on mockery --name '.*' --dir=network --case=underscore --output="./network/mocknetwork" --outpkg="mocknetwork" - GO111MODULE=on mockery --name '.*' --dir=storage --case=underscore --output="./storage/mock" --outpkg="mock" - GO111MODULE=on mockery --name '.*' --dir="state/protocol" --case=underscore --output="state/protocol/mock" --outpkg="mock" - GO111MODULE=on mockery --name '.*' --dir="state/protocol/events" --case=underscore --output="./state/protocol/events/mock" --outpkg="mock" - GO111MODULE=on mockery --name '.*' --dir=engine/execution/computation/computer --case=underscore --output="./engine/execution/computation/computer/mock" --outpkg="mock" - GO111MODULE=on mockery --name '.*' --dir=engine/execution/state --case=underscore --output="./engine/execution/state/mock" --outpkg="mock" - GO111MODULE=on mockery --name '.*' --dir=engine/consensus --case=underscore --output="./engine/consensus/mock" --outpkg="mock" - GO111MODULE=on mockery --name '.*' --dir=engine/consensus/approvals --case=underscore --output="./engine/consensus/approvals/mock" --outpkg="mock" - GO111MODULE=on mockery --name '.*' --dir=fvm --case=underscore --output="./fvm/mock" --outpkg="mock" - GO111MODULE=on mockery --name '.*' --dir=fvm/state --case=underscore --output="./fvm/mock/state" --outpkg="mock" - GO111MODULE=on mockery --name '.*' --dir=ledger --case=underscore --output="./ledger/mock" --outpkg="mock" - GO111MODULE=on mockery --name 'SubscriptionManager' --dir=network/ --case=underscore --output="./network/mocknetwork" --outpkg="mocknetwork" - GO111MODULE=on mockery --name 'Vertex' --dir="./module/forest" --case=underscore --output="./module/forest/mock" --outpkg="mock" - GO111MODULE=on mockery --name '.*' --dir="./consensus/hotstuff" --case=underscore --output="./consensus/hotstuff/mocks" --outpkg="mocks" - GO111MODULE=on mockery --name '.*' --dir="./engine/access/wrapper" --case=underscore --output="./engine/access/mock" --outpkg="mock" - GO111MODULE=on mockery --name 'API' --dir="./access" --case=underscore --output="./access/mock" --outpkg="mock" - GO111MODULE=on mockery --name 'ConnectionFactory' --dir="./engine/access/rpc/backend" --case=underscore --output="./engine/access/rpc/backend/mock" --outpkg="mock" - GO111MODULE=on mockery --name 'IngestRPC' --dir="./engine/execution/ingestion" --case=underscore --tags relic --output="./engine/execution/ingestion/mock" --outpkg="mock" - GO111MODULE=on mockery --name '.*' --dir=model/fingerprint --case=underscore --output="./model/fingerprint/mock" --outpkg="mock" - GO111MODULE=on mockery --name 'ExecForkActor' --structname 'ExecForkActorMock' --dir=module/mempool/consensus/mock/ --case=underscore --output="./module/mempool/consensus/mock/" --outpkg="mock" - GO111MODULE=on mockery --name '.*' --dir=engine/verification/fetcher/ --case=underscore --output="./engine/verification/fetcher/mock" --outpkg="mockfetcher" - GO111MODULE=on mockery --name '.*' --dir=insecure/ --case=underscore --output="./insecure/mock" --outpkg="mockinsecure" - GO111MODULE=on mockery --name '.*' --dir=./cmd/util/ledger/reporters --case=underscore --output="./cmd/util/ledger/reporters/mock" --outpkg="mock" + mockery --name '(Connector|PingInfoProvider)' --dir=network/p2p --case=underscore --output="./network/mocknetwork" --outpkg="mocknetwork" + mockgen -destination=storage/mocks/storage.go -package=mocks github.com/onflow/flow-go/storage Blocks,Headers,Payloads,Collections,Commits,Events,ServiceEvents,TransactionResults + mockgen -destination=module/mocks/network.go -package=mocks github.com/onflow/flow-go/module Local,Requester + mockgen -destination=network/mocknetwork/engine.go -package=mocknetwork github.com/onflow/flow-go/network Engine + mockgen -destination=network/mocknetwork/mock_network.go -package=mocknetwork github.com/onflow/flow-go/network Network + mockery --name '(ExecutionDataService|ExecutionDataCIDCache|ExecutionDataRequester)' --dir=module/state_synchronization --case=underscore --output="./module/state_synchronization/mock" --outpkg="state_synchronization" + mockery --name 'ExecutionState' --dir=engine/execution/state --case=underscore --output="engine/execution/state/mock" --outpkg="mock" + mockery --name 'BlockComputer' --dir=engine/execution/computation/computer --case=underscore --output="engine/execution/computation/computer/mock" --outpkg="mock" + mockery --name 'ComputationManager' --dir=engine/execution/computation --case=underscore --output="engine/execution/computation/mock" --outpkg="mock" + mockery --name 'EpochComponentsFactory' --dir=engine/collection/epochmgr --case=underscore --output="engine/collection/epochmgr/mock" --outpkg="mock" + mockery --name 'Backend' --dir=engine/collection/rpc --case=underscore --output="engine/collection/rpc/mock" --outpkg="mock" + mockery --name 'ProviderEngine' --dir=engine/execution/provider --case=underscore --output="engine/execution/provider/mock" --outpkg="mock" + (cd ./crypto && mockery --name 'PublicKey' --case=underscore --output="../module/mock" --outpkg="mock") + mockery --name '.*' --dir=state/cluster --case=underscore --output="state/cluster/mock" --outpkg="mock" + mockery --name '.*' --dir=module --case=underscore --tags="relic" --output="./module/mock" --outpkg="mock" + mockery --name '.*' --dir=module/mempool --case=underscore --output="./module/mempool/mock" --outpkg="mempool" + mockery --name '.*' --dir=module/component --case=underscore --output="./module/component/mock" --outpkg="component" + mockery --name '.*' --dir=network --case=underscore --output="./network/mocknetwork" --outpkg="mocknetwork" + mockery --name '.*' --dir=storage --case=underscore --output="./storage/mock" --outpkg="mock" + mockery --name '.*' --dir="state/protocol" --case=underscore --output="state/protocol/mock" --outpkg="mock" + mockery --name '.*' --dir="state/protocol/events" --case=underscore --output="./state/protocol/events/mock" --outpkg="mock" + mockery --name '.*' --dir=engine/execution/computation/computer --case=underscore --output="./engine/execution/computation/computer/mock" --outpkg="mock" + mockery --name '.*' --dir=engine/execution/state --case=underscore --output="./engine/execution/state/mock" --outpkg="mock" + mockery --name '.*' --dir=engine/consensus --case=underscore --output="./engine/consensus/mock" --outpkg="mock" + mockery --name '.*' --dir=engine/consensus/approvals --case=underscore --output="./engine/consensus/approvals/mock" --outpkg="mock" + mockery --name '.*' --dir=fvm --case=underscore --output="./fvm/mock" --outpkg="mock" + mockery --name '.*' --dir=fvm/state --case=underscore --output="./fvm/mock/state" --outpkg="mock" + mockery --name '.*' --dir=ledger --case=underscore --output="./ledger/mock" --outpkg="mock" + mockery --name 'SubscriptionManager' --dir=network/ --case=underscore --output="./network/mocknetwork" --outpkg="mocknetwork" + mockery --name 'Vertex' --dir="./module/forest" --case=underscore --output="./module/forest/mock" --outpkg="mock" + mockery --name '.*' --dir="./consensus/hotstuff" --case=underscore --output="./consensus/hotstuff/mocks" --outpkg="mocks" + mockery --name '.*' --dir="./engine/access/wrapper" --case=underscore --output="./engine/access/mock" --outpkg="mock" + mockery --name 'API' --dir="./access" --case=underscore --output="./access/mock" --outpkg="mock" + mockery --name 'ConnectionFactory' --dir="./engine/access/rpc/backend" --case=underscore --output="./engine/access/rpc/backend/mock" --outpkg="mock" + mockery --name 'IngestRPC' --dir="./engine/execution/ingestion" --case=underscore --tags relic --output="./engine/execution/ingestion/mock" --outpkg="mock" + mockery --name '.*' --dir=model/fingerprint --case=underscore --output="./model/fingerprint/mock" --outpkg="mock" + mockery --name 'ExecForkActor' --structname 'ExecForkActorMock' --dir=module/mempool/consensus/mock/ --case=underscore --output="./module/mempool/consensus/mock/" --outpkg="mock" + mockery --name '.*' --dir=engine/verification/fetcher/ --case=underscore --output="./engine/verification/fetcher/mock" --outpkg="mockfetcher" + mockery --name '.*' --dir=insecure/ --case=underscore --output="./insecure/mock" --outpkg="mockinsecure" + mockery --name '.*' --dir=./cmd/util/ledger/reporters --case=underscore --output="./cmd/util/ledger/reporters/mock" --outpkg="mock" # this ensures there is no unused dependency being added by accident .PHONY: tidy @@ -170,12 +170,12 @@ tidy: .PHONY: lint lint: tidy - # GO111MODULE=on revive -config revive.toml -exclude storage/ledger/trie ./... + # revive -config revive.toml -exclude storage/ledger/trie ./... golangci-lint run -v --build-tags relic ./... .PHONY: fix-lint fix-lint: - # GO111MODULE=on revive -config revive.toml -exclude storage/ledger/trie ./... + # revive -config revive.toml -exclude storage/ledger/trie ./... golangci-lint run -v --build-tags relic --fix ./... # Runs unit tests, SKIP FOR NOW linter, coverage diff --git a/cmd/Dockerfile b/cmd/Dockerfile index 3079f0f2a17..b377a0db4a9 100644 --- a/cmd/Dockerfile +++ b/cmd/Dockerfile @@ -30,7 +30,7 @@ ARG GOARCH=amd64 # https://github.com/golang/go/issues/27719#issuecomment-514747274 RUN --mount=type=cache,sharing=locked,target=/go/pkg/mod \ --mount=type=cache,target=/root/.cache/go-build \ - GO111MODULE=on CGO_ENABLED=1 GOOS=linux go build --tags "relic,netgo" -ldflags "-extldflags -static \ + CGO_ENABLED=1 GOOS=linux go build --tags "relic,netgo" -ldflags "-extldflags -static \ -X 'github.com/onflow/flow-go/cmd/build.commit=${COMMIT}' -X 'github.com/onflow/flow-go/cmd/build.semver=${VERSION}'" \ -o ./app ${TARGET} @@ -50,7 +50,7 @@ ARG GOARCH=amd64 RUN --mount=type=ssh \ --mount=type=cache,sharing=locked,target=/go/pkg/mod \ --mount=type=cache,target=/root/.cache/go-build \ - GO111MODULE=on CGO_ENABLED=1 GOOS=linux go build --tags "relic,netgo" -ldflags "-extldflags -static \ + CGO_ENABLED=1 GOOS=linux go build --tags "relic,netgo" -ldflags "-extldflags -static \ -X 'github.com/onflow/flow-go/cmd/build.commit=${COMMIT}' -X 'github.com/onflow/flow-go/cmd/build.semver=${VERSION}'" \ -gcflags="all=-N -l" -o ./app ${TARGET} diff --git a/crypto/Makefile b/crypto/Makefile index b788a459dfd..c80e783c870 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -13,19 +13,19 @@ setup: .PHONY: relic_tests relic_tests: ifeq ($(ADX_SUPPORT), 1) - GO111MODULE=on go test -coverprofile=$(COVER_PROFILE) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) --tags relic $(if $(VERBOSE),-v,) + go test -coverprofile=$(COVER_PROFILE) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) --tags relic $(if $(VERBOSE),-v,) else - CGO_CFLAGS="-D__BLST_PORTABLE__" GO111MODULE=on go test -coverprofile=$(COVER_PROFILE) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) --tags relic $(if $(VERBOSE),-v,) + CGO_CFLAGS="-D__BLST_PORTABLE__" go test -coverprofile=$(COVER_PROFILE) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) --tags relic $(if $(VERBOSE),-v,) endif # test all packages that do not require Relic library (all functionalities except the BLS-related ones) .PHONY: non_relic_tests non_relic_tests: # root package without relic - GO111MODULE=on go test -coverprofile=$(COVER_PROFILE) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) $(if $(VERBOSE),-v,) + go test -coverprofile=$(COVER_PROFILE) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) $(if $(VERBOSE),-v,) # sub packages - GO111MODULE=on go test -coverprofile=$(COVER_PROFILE) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) $(if $(VERBOSE),-v,) ./hash - GO111MODULE=on go test -coverprofile=$(COVER_PROFILE) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) $(if $(VERBOSE),-v,) ./random + go test -coverprofile=$(COVER_PROFILE) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) $(if $(VERBOSE),-v,) ./hash + go test -coverprofile=$(COVER_PROFILE) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) $(if $(VERBOSE),-v,) ./random ############################################################################################ # CAUTION: DO NOT MODIFY THIS TARGET! DOING SO WILL BREAK THE FLAKY TEST MONITOR diff --git a/integration/Makefile b/integration/Makefile index 92dc24a39d9..ccfe9a1352e 100644 --- a/integration/Makefile +++ b/integration/Makefile @@ -10,11 +10,11 @@ ci-integration-test: access-tests ghost-tests mvp-tests epochs-tests consensus-t .PHONY: benchmark benchmark: - GO111MODULE=on go test -v -tags relic -count=1 -timeout 30m ./benchmark -run Benchmark + go test -v -tags relic -count=1 -timeout 30m ./benchmark -run Benchmark .PHONY: ci-benchmark ci-benchmark: - GO111MODULE=on ENV=TEAMCITY go test -v -tags relic -count=1 -timeout 15m ./benchmark -run Benchmark + ENV=TEAMCITY go test -v -tags relic -count=1 -timeout 15m ./benchmark -run Benchmark ############################################################################################ # CAUTION: DO NOT MODIFY THE TARGETS BELOW! DOING SO WILL BREAK THE FLAKY TEST MONITOR @@ -23,48 +23,48 @@ ci-benchmark: # Run unit tests for test utilities in this module .PHONY: test test: - GO111MODULE=on go test $(if $(VERBOSE),-v,) -tags relic -coverprofile=$(COVER_PROFILE) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) `go list ./... | grep -v -e integration/tests -e integration/benchmark` + go test $(if $(VERBOSE),-v,) -tags relic -coverprofile=$(COVER_PROFILE) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) `go list ./... | grep -v -e integration/tests -e integration/benchmark` .PHONY: access-tests access-tests: - GO111MODULE=on go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/access/... + go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/access/... .PHONY: collection-tests collection-tests: - GO111MODULE=on go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/collection/... + go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/collection/... .PHONY: consensus-tests consensus-tests: - GO111MODULE=on go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/consensus/... + go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/consensus/... .PHONY: epochs-tests epochs-tests: - GO111MODULE=on go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/epochs/... + go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/epochs/... .PHONY: ghost-tests ghost-tests: - GO111MODULE=on go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/ghost/... + go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/ghost/... .PHONY: mvp-tests mvp-tests: - GO111MODULE=on go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/mvp/... + go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/mvp/... .PHONY: execution-tests execution-tests: - GO111MODULE=on go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/execution/... + go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/execution/... .PHONY: verification-tests verification-tests: - GO111MODULE=on go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/verification/... + go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/verification/... .PHONY: network-tests network-tests: - GO111MODULE=on go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/network/... + go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/network/... # BFT tests need to be run sequentially (-p 1) due to interference between different Docker networks when tests are run in parallel .PHONY: bft-tests bft-tests: - GO111MODULE=on go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/bft/... -p 1 + go test $(if $(VERBOSE),-v,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/bft/... -p 1 ############################################################################################ diff --git a/integration/loader/Dockerfile b/integration/loader/Dockerfile index d48e9aec1ae..c3bc9fded17 100644 --- a/integration/loader/Dockerfile +++ b/integration/loader/Dockerfile @@ -48,7 +48,7 @@ RUN --mount=type=cache,sharing=locked,target=/go/pkg/mod \ --mount=type=cache,target=/root/.cache/go-build \ --mount=type=ssh \ cd integration && \ - GO111MODULE=on CGO_ENABLED=1 go build --tags relic -ldflags "-extldflags -static" -o ./app ./${TARGET} + CGO_ENABLED=1 go build --tags relic -ldflags "-extldflags -static" -o ./app ./${TARGET} RUN mv /app/integration/app /app/app diff --git a/utils/binstat/README.md b/utils/binstat/README.md index 3c71f9cb1ef..aa74bb21071 100644 --- a/utils/binstat/README.md +++ b/utils/binstat/README.md @@ -94,7 +94,7 @@ import ( * This example -- on a GCP Linux box -- launches 6 identical go-routines, 3 times, with `gomaxprocs=1` & then `=8`. ``` -$ pushd binstat ; GO111MODULE=on go test -v -vv ./... 2>&1 | perl -lane 's~\\n~\n~g; s~"time".*?,~~g; print;' ; popd +$ pushd binstat ; go test -v -vv ./... 2>&1 | perl -lane 's~\\n~\n~g; s~"time".*?,~~g; print;' ; popd ... === RUN TestWithPprof ... diff --git a/utils/binstat/binstat_external_test.go b/utils/binstat/binstat_external_test.go index 6678297a08c..9ffa7b23065 100644 --- a/utils/binstat/binstat_external_test.go +++ b/utils/binstat/binstat_external_test.go @@ -28,7 +28,7 @@ import ( * 5. Strip "time" field from JSON log line output for shorter read, and * 6. Show the amount of code coverage from the tests. * - * pushd utils/binstat ; go fmt ./*.go ; golangci-lint run && GO111MODULE=on go test -v -vv -coverprofile=coverage.txt -covermode=atomic --tags relic ./... | perl -lane 's~\\n~\n~g; s~"time".*?,~~g; print;' ; go tool cover -func=coverage.txt ; popd + * pushd utils/binstat ; go fmt ./*.go ; golangci-lint run && go test -v -vv -coverprofile=coverage.txt -covermode=atomic --tags relic ./... | perl -lane 's~\\n~\n~g; s~"time".*?,~~g; print;' ; go tool cover -func=coverage.txt ; popd */ /* From 2ead3f04079f2f3a2f434a427222bc4a56107961 Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Fri, 15 Jul 2022 12:37:50 -0700 Subject: [PATCH 213/223] chore(comment): fix IsSampled description --- model/flow/identifier.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/model/flow/identifier.go b/model/flow/identifier.go index 57041040363..5fc5bb33c2a 100644 --- a/model/flow/identifier.go +++ b/model/flow/identifier.go @@ -77,8 +77,11 @@ func (id Identifier) Format(state fmt.State, verb rune) { // IsSampled is a utility method to sample entities based on their ids // the range is from [0, 64]. // 0 is 100% (all data will be collected) -// 32 is ~50% -// 64 is ~0% (no data will be collected) +// 1 is ~50% +// 2 is ~25% +// 3 is ~12.5% +// ... +// >64 is 0% (no data will be collected) func (id Identifier) IsSampled(sensitivity uint) bool { if sensitivity > 64 { return false From 48bd511c33e90eed3b424f59062e2e752983c4d6 Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Mon, 18 Jul 2022 22:54:13 -0700 Subject: [PATCH 214/223] chore(tests): remove unused mock --- state/protocol/mock/mutation_context.go | 29 ------------------------- 1 file changed, 29 deletions(-) delete mode 100644 state/protocol/mock/mutation_context.go diff --git a/state/protocol/mock/mutation_context.go b/state/protocol/mock/mutation_context.go deleted file mode 100644 index 0bafd8f690c..00000000000 --- a/state/protocol/mock/mutation_context.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mock - -import ( - opentracing "github.com/opentracing/opentracing-go" - mock "github.com/stretchr/testify/mock" -) - -// MutationContext is an autogenerated mock type for the MutationContext type -type MutationContext struct { - mock.Mock -} - -// ParentTraceSpan provides a mock function with given fields: -func (_m *MutationContext) ParentTraceSpan() opentracing.Span { - ret := _m.Called() - - var r0 opentracing.Span - if rf, ok := ret.Get(0).(func() opentracing.Span); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(opentracing.Span) - } - } - - return r0 -} From d892f073f25325890736c6b949bc1d4eedef22b9 Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Thu, 16 Jun 2022 13:46:52 -0700 Subject: [PATCH 215/223] Add a separate channel name for push-blocks on the public network --- engine/common/follower/engine.go | 7 ++++++- module/local.go | 3 +++ module/local/me.go | 4 ++++ module/local/me_nokey.go | 4 ++++ network/channels/channels.go | 2 ++ network/relay/network.go | 10 ++++++---- .../validator/pubsub/authorized_sender_validator.go | 5 +++++ 7 files changed, 30 insertions(+), 5 deletions(-) diff --git a/engine/common/follower/engine.go b/engine/common/follower/engine.go index 9688edc37aa..a4ec08b8766 100644 --- a/engine/common/follower/engine.go +++ b/engine/common/follower/engine.go @@ -81,7 +81,12 @@ func New( tracer: tracer, } - con, err := net.Register(channels.ReceiveBlocks, e) + channel := channels.ReceiveBlocks + if !me.StakedNode() { + channel = channels.PublicPushBlocks + } + + con, err := net.Register(channel, e) if err != nil { return nil, fmt.Errorf("could not register engine to network: %w", err) } diff --git a/module/local.go b/module/local.go index e1fa2f5fa45..70fd34ea8d1 100644 --- a/module/local.go +++ b/module/local.go @@ -17,6 +17,9 @@ type Local interface { // Address returns the (listen) address of the local node. Address() string + // StakedNode returns true if the local node is a staked node. (i.e. not an observer) + StakedNode() bool + // Sign provides a signature oracle that given a message and hasher, it // generates and returns a signature over the message using the node's private key // as well as the input hasher diff --git a/module/local/me.go b/module/local/me.go index 6a2f1ce117a..1c1b1c34d04 100644 --- a/module/local/me.go +++ b/module/local/me.go @@ -37,6 +37,10 @@ func (l *Local) Address() string { return l.me.Address } +func (l *Local) StakedNode() bool { + return true +} + func (l *Local) Sign(msg []byte, hasher hash.Hasher) (crypto.Signature, error) { return l.sk.Sign(msg, hasher) } diff --git a/module/local/me_nokey.go b/module/local/me_nokey.go index d9de4348dc1..53efce93b27 100644 --- a/module/local/me_nokey.go +++ b/module/local/me_nokey.go @@ -28,6 +28,10 @@ func (l *LocalNoKey) Address() string { return l.me.Address } +func (l *LocalNoKey) StakedNode() bool { + return false +} + func (l *LocalNoKey) Sign(msg []byte, hasher hash.Hasher) (crypto.Signature, error) { return nil, fmt.Errorf("no private key") } diff --git a/network/channels/channels.go b/network/channels/channels.go index a71cf612ca7..43cc0439b29 100644 --- a/network/channels/channels.go +++ b/network/channels/channels.go @@ -101,6 +101,7 @@ func Channels() ChannelList { func PublicChannels() ChannelList { return ChannelList{ PublicSyncCommittee, + PublicPushBlocks, } } @@ -149,6 +150,7 @@ const ( ProvideApprovalsByChunk = RequestApprovalsByChunk // Public network channels + PublicPushBlocks = Channel("public-push-blocks") PublicSyncCommittee = Channel("public-sync-committee") // Execution data service diff --git a/network/relay/network.go b/network/relay/network.go index 7fc155d55f6..63abd6682a4 100644 --- a/network/relay/network.go +++ b/network/relay/network.go @@ -17,7 +17,7 @@ type RelayNetwork struct { originNet network.Network destinationNet network.Network logger zerolog.Logger - channels channels.ChannelList + channels map[channels.Channel]channels.Channel } var _ network.Network = (*RelayNetwork)(nil) @@ -26,7 +26,7 @@ func NewRelayNetwork( originNetwork network.Network, destinationNetwork network.Network, logger zerolog.Logger, - channels []channels.Channel, + channels map[channels.Channel]channels.Channel, ) *RelayNetwork { return &RelayNetwork{ originNet: originNetwork, @@ -37,11 +37,13 @@ func NewRelayNetwork( } func (r *RelayNetwork) Register(channel channels.Channel, messageProcessor network.MessageProcessor) (network.Conduit, error) { - if !r.channels.Contains(channel) { + // Only relay configured channels + dstChannel, ok := r.channels[channel] + if !ok { return r.originNet.Register(channel, messageProcessor) } - relayer, err := NewRelayer(r.destinationNet, channel, messageProcessor) + relayer, err := NewRelayer(r.destinationNet, dstChannel, messageProcessor) if err != nil { return nil, fmt.Errorf("failed to register relayer on origin network: %w", err) diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index 1043fb3d611..bcdfdffdc2e 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -64,6 +64,7 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel channels.Channel, get log.Error(). Err(err). Str("peer_id", from.String()). + Str("peer_node_id", identity.NodeID.String()). Str("role", identity.Role.String()). Str("peer_node_id", identity.NodeID.String()). Str("message_type", msgType). @@ -109,6 +110,10 @@ func isAuthorizedSender(identity *flow.Identity, channel channels.Channel, msg i return "", err } + if network.PublicChannels().Contains(channel) { + return conf.String, nil + } + // handle special case for cluster prefixed channels if prefix, ok := channels.ClusterChannelPrefix(channel); ok { channel = channels.Channel(prefix) From ca4057c8fb3c4e16cc41e2edf696142f0cdb8d12 Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Thu, 16 Jun 2022 17:06:00 -0700 Subject: [PATCH 216/223] Add channel override via an option --- .../node_builder/access_node_builder.go | 2 +- cmd/collection/main.go | 2 +- cmd/execution_builder.go | 2 +- cmd/verification_builder.go | 2 +- engine/common/follower/engine.go | 35 ++++++++++++------- module/local.go | 3 -- module/local/me.go | 4 --- module/local/me_nokey.go | 4 --- .../pubsub/authorized_sender_validator.go | 4 --- 9 files changed, 27 insertions(+), 31 deletions(-) diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index dfa7bccb5af..1bf087ef238 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -326,7 +326,7 @@ func (builder *FlowAccessNodeBuilder) buildFollowerEngine() *FlowAccessNodeBuild builder.FollowerCore, builder.SyncCore, node.Tracer, - compliance.WithSkipNewProposalsThreshold(builder.ComplianceConfig.SkipNewProposalsThreshold), + follower.WithComplianceOptions(compliance.WithSkipNewProposalsThreshold(builder.ComplianceConfig.SkipNewProposalsThreshold)), ) if err != nil { return nil, fmt.Errorf("could not create follower engine: %w", err) diff --git a/cmd/collection/main.go b/cmd/collection/main.go index fd671fb5454..18cbeda00ca 100644 --- a/cmd/collection/main.go +++ b/cmd/collection/main.go @@ -311,7 +311,7 @@ func main() { followerCore, mainChainSyncCore, node.Tracer, - modulecompliance.WithSkipNewProposalsThreshold(node.ComplianceConfig.SkipNewProposalsThreshold), + followereng.WithComplianceOptions(modulecompliance.WithSkipNewProposalsThreshold(node.ComplianceConfig.SkipNewProposalsThreshold)), ) if err != nil { return nil, fmt.Errorf("could not create follower engine: %w", err) diff --git a/cmd/execution_builder.go b/cmd/execution_builder.go index 35e1d590dbf..e299740dfb7 100644 --- a/cmd/execution_builder.go +++ b/cmd/execution_builder.go @@ -722,7 +722,7 @@ func (e *ExecutionNodeBuilder) LoadComponentsAndModules() { followerCore, syncCore, node.Tracer, - compliance.WithSkipNewProposalsThreshold(node.ComplianceConfig.SkipNewProposalsThreshold), + followereng.WithComplianceOptions(compliance.WithSkipNewProposalsThreshold(node.ComplianceConfig.SkipNewProposalsThreshold)), ) if err != nil { return nil, fmt.Errorf("could not create follower engine: %w", err) diff --git a/cmd/verification_builder.go b/cmd/verification_builder.go index 3b063124290..a1c460f29e6 100644 --- a/cmd/verification_builder.go +++ b/cmd/verification_builder.go @@ -359,7 +359,7 @@ func (v *VerificationNodeBuilder) LoadComponentsAndModules() { followerCore, syncCore, node.Tracer, - compliance.WithSkipNewProposalsThreshold(node.ComplianceConfig.SkipNewProposalsThreshold), + follower.WithComplianceOptions(compliance.WithSkipNewProposalsThreshold(node.ComplianceConfig.SkipNewProposalsThreshold)), ) if err != nil { return nil, fmt.Errorf("could not create follower engine: %w", err) diff --git a/engine/common/follower/engine.go b/engine/common/follower/engine.go index a4ec08b8766..ae1ba920f41 100644 --- a/engine/common/follower/engine.go +++ b/engine/common/follower/engine.go @@ -40,6 +40,23 @@ type Engine struct { con network.Conduit sync module.BlockRequester tracer module.Tracer + channel network.Channel +} + +type Option func(*Engine) + +func WithComplianceOptions(opts ...compliance.Opt) Option { + return func(e *Engine) { + for _, apply := range opts { + apply(&e.config) + } + } +} + +func WithChannel(channel network.Channel) Option { + return func(e *Engine) { + e.channel = channel + } } func New( @@ -56,18 +73,12 @@ func New( follower module.HotStuffFollower, sync module.BlockRequester, tracer module.Tracer, - opts ...compliance.Opt, + opts ...Option, ) (*Engine, error) { - - config := compliance.DefaultConfig() - for _, apply := range opts { - apply(&config) - } - e := &Engine{ unit: engine.NewUnit(), log: log.With().Str("engine", "follower").Logger(), - config: config, + config: compliance.DefaultConfig(), me: me, engMetrics: engMetrics, mempoolMetrics: mempoolMetrics, @@ -79,14 +90,14 @@ func New( follower: follower, sync: sync, tracer: tracer, + channel: channels.ReceiveBlocks, } - channel := channels.ReceiveBlocks - if !me.StakedNode() { - channel = channels.PublicPushBlocks + for _, apply := range opts { + apply(e) } - con, err := net.Register(channel, e) + con, err := net.Register(e.channel, e) if err != nil { return nil, fmt.Errorf("could not register engine to network: %w", err) } diff --git a/module/local.go b/module/local.go index 70fd34ea8d1..e1fa2f5fa45 100644 --- a/module/local.go +++ b/module/local.go @@ -17,9 +17,6 @@ type Local interface { // Address returns the (listen) address of the local node. Address() string - // StakedNode returns true if the local node is a staked node. (i.e. not an observer) - StakedNode() bool - // Sign provides a signature oracle that given a message and hasher, it // generates and returns a signature over the message using the node's private key // as well as the input hasher diff --git a/module/local/me.go b/module/local/me.go index 1c1b1c34d04..6a2f1ce117a 100644 --- a/module/local/me.go +++ b/module/local/me.go @@ -37,10 +37,6 @@ func (l *Local) Address() string { return l.me.Address } -func (l *Local) StakedNode() bool { - return true -} - func (l *Local) Sign(msg []byte, hasher hash.Hasher) (crypto.Signature, error) { return l.sk.Sign(msg, hasher) } diff --git a/module/local/me_nokey.go b/module/local/me_nokey.go index 53efce93b27..d9de4348dc1 100644 --- a/module/local/me_nokey.go +++ b/module/local/me_nokey.go @@ -28,10 +28,6 @@ func (l *LocalNoKey) Address() string { return l.me.Address } -func (l *LocalNoKey) StakedNode() bool { - return false -} - func (l *LocalNoKey) Sign(msg []byte, hasher hash.Hasher) (crypto.Signature, error) { return nil, fmt.Errorf("no private key") } diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index bcdfdffdc2e..22ef7e3b1c9 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -110,10 +110,6 @@ func isAuthorizedSender(identity *flow.Identity, channel channels.Channel, msg i return "", err } - if network.PublicChannels().Contains(channel) { - return conf.String, nil - } - // handle special case for cluster prefixed channels if prefix, ok := channels.ClusterChannelPrefix(channel); ok { channel = channels.Channel(prefix) From bc9bd34afcd59e899ba3230cc6d3e6875358d0ad Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Wed, 6 Jul 2022 17:06:46 -0700 Subject: [PATCH 217/223] integrate into consensus follower and observer --- cmd/access/node_builder/access_node_builder.go | 4 +++- cmd/observer/node_builder/observer_builder.go | 3 ++- follower/follower_builder.go | 3 ++- network/channels/channels.go | 3 ++- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index 1bf087ef238..37da0946612 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -711,7 +711,9 @@ func (builder *FlowAccessNodeBuilder) enqueueRelayNetwork() { node.Network, builder.AccessNodeConfig.PublicNetworkConfig.Network, node.Logger, - []channels.Channel{channels.ReceiveBlocks}, + map[channels.Channel]channels.Channel{ + channels.PushBlocks: channels.PublicPushBlocks, + }, ) node.Network = relayNet return relayNet, nil diff --git a/cmd/observer/node_builder/observer_builder.go b/cmd/observer/node_builder/observer_builder.go index d8f16d4d7f4..62f61947099 100644 --- a/cmd/observer/node_builder/observer_builder.go +++ b/cmd/observer/node_builder/observer_builder.go @@ -362,7 +362,8 @@ func (builder *ObserverServiceBuilder) buildFollowerEngine() *ObserverServiceBui builder.FollowerCore, builder.SyncCore, node.Tracer, - compliance.WithSkipNewProposalsThreshold(builder.ComplianceConfig.SkipNewProposalsThreshold), + follower.WithComplianceOptions(compliance.WithSkipNewProposalsThreshold(builder.ComplianceConfig.SkipNewProposalsThreshold)), + follower.WithChannel(network.PublicReceiveBlocks), ) if err != nil { return nil, fmt.Errorf("could not create follower engine: %w", err) diff --git a/follower/follower_builder.go b/follower/follower_builder.go index e601df65354..d2708edc111 100644 --- a/follower/follower_builder.go +++ b/follower/follower_builder.go @@ -243,7 +243,8 @@ func (builder *FollowerServiceBuilder) buildFollowerEngine() *FollowerServiceBui builder.FollowerCore, builder.SyncCore, node.Tracer, - compliance.WithSkipNewProposalsThreshold(builder.ComplianceConfig.SkipNewProposalsThreshold), + follower.WithComplianceOptions(compliance.WithSkipNewProposalsThreshold(builder.ComplianceConfig.SkipNewProposalsThreshold)), + follower.WithChannel(network.PublicReceiveBlocks), ) if err != nil { return nil, fmt.Errorf("could not create follower engine: %w", err) diff --git a/network/channels/channels.go b/network/channels/channels.go index 43cc0439b29..0e3b98f7950 100644 --- a/network/channels/channels.go +++ b/network/channels/channels.go @@ -101,7 +101,7 @@ func Channels() ChannelList { func PublicChannels() ChannelList { return ChannelList{ PublicSyncCommittee, - PublicPushBlocks, + PublicReceiveBlocks, } } @@ -151,6 +151,7 @@ const ( // Public network channels PublicPushBlocks = Channel("public-push-blocks") + PublicReceiveBlocks = PublicPushBlocks PublicSyncCommittee = Channel("public-sync-committee") // Execution data service From 0e0f995b13a8c2606c05e26e058ce5be47d438f0 Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Wed, 6 Jul 2022 20:43:07 -0700 Subject: [PATCH 218/223] add comments and switch channel varable name --- cmd/access/node_builder/access_node_builder.go | 2 +- engine/common/follower/engine.go | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index 37da0946612..16d0aa0731a 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -712,7 +712,7 @@ func (builder *FlowAccessNodeBuilder) enqueueRelayNetwork() { builder.AccessNodeConfig.PublicNetworkConfig.Network, node.Logger, map[channels.Channel]channels.Channel{ - channels.PushBlocks: channels.PublicPushBlocks, + channels.ReceiveBlocks: channels.PublicReceiveBlocks, }, ) node.Network = relayNet diff --git a/engine/common/follower/engine.go b/engine/common/follower/engine.go index ae1ba920f41..a66a82b21a9 100644 --- a/engine/common/follower/engine.go +++ b/engine/common/follower/engine.go @@ -45,6 +45,7 @@ type Engine struct { type Option func(*Engine) +// WithComplianceOptions sets options for the engine's compliance config func WithComplianceOptions(opts ...compliance.Opt) Option { return func(e *Engine) { for _, apply := range opts { @@ -53,6 +54,7 @@ func WithComplianceOptions(opts ...compliance.Opt) Option { } } +// WithChannel sets the channel the follower engine will use to receive blocks. func WithChannel(channel network.Channel) Option { return func(e *Engine) { e.channel = channel From 3fff835748fcf6bcb360cb3cf8ec369c8afa44fd Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Tue, 19 Jul 2022 15:00:16 -0700 Subject: [PATCH 219/223] fixes after rebasing master --- cmd/observer/node_builder/observer_builder.go | 2 +- engine/common/follower/engine.go | 4 ++-- follower/follower_builder.go | 2 +- network/validator/pubsub/authorized_sender_validator.go | 1 - 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/cmd/observer/node_builder/observer_builder.go b/cmd/observer/node_builder/observer_builder.go index 62f61947099..79885b6d7b6 100644 --- a/cmd/observer/node_builder/observer_builder.go +++ b/cmd/observer/node_builder/observer_builder.go @@ -363,7 +363,7 @@ func (builder *ObserverServiceBuilder) buildFollowerEngine() *ObserverServiceBui builder.SyncCore, node.Tracer, follower.WithComplianceOptions(compliance.WithSkipNewProposalsThreshold(builder.ComplianceConfig.SkipNewProposalsThreshold)), - follower.WithChannel(network.PublicReceiveBlocks), + follower.WithChannel(channels.PublicReceiveBlocks), ) if err != nil { return nil, fmt.Errorf("could not create follower engine: %w", err) diff --git a/engine/common/follower/engine.go b/engine/common/follower/engine.go index a66a82b21a9..4b090383140 100644 --- a/engine/common/follower/engine.go +++ b/engine/common/follower/engine.go @@ -40,7 +40,7 @@ type Engine struct { con network.Conduit sync module.BlockRequester tracer module.Tracer - channel network.Channel + channel channels.Channel } type Option func(*Engine) @@ -55,7 +55,7 @@ func WithComplianceOptions(opts ...compliance.Opt) Option { } // WithChannel sets the channel the follower engine will use to receive blocks. -func WithChannel(channel network.Channel) Option { +func WithChannel(channel channels.Channel) Option { return func(e *Engine) { e.channel = channel } diff --git a/follower/follower_builder.go b/follower/follower_builder.go index d2708edc111..0675da7a914 100644 --- a/follower/follower_builder.go +++ b/follower/follower_builder.go @@ -244,7 +244,7 @@ func (builder *FollowerServiceBuilder) buildFollowerEngine() *FollowerServiceBui builder.SyncCore, node.Tracer, follower.WithComplianceOptions(compliance.WithSkipNewProposalsThreshold(builder.ComplianceConfig.SkipNewProposalsThreshold)), - follower.WithChannel(network.PublicReceiveBlocks), + follower.WithChannel(channels.PublicReceiveBlocks), ) if err != nil { return nil, fmt.Errorf("could not create follower engine: %w", err) diff --git a/network/validator/pubsub/authorized_sender_validator.go b/network/validator/pubsub/authorized_sender_validator.go index 22ef7e3b1c9..1043fb3d611 100644 --- a/network/validator/pubsub/authorized_sender_validator.go +++ b/network/validator/pubsub/authorized_sender_validator.go @@ -64,7 +64,6 @@ func AuthorizedSenderValidator(log zerolog.Logger, channel channels.Channel, get log.Error(). Err(err). Str("peer_id", from.String()). - Str("peer_node_id", identity.NodeID.String()). Str("role", identity.Role.String()). Str("peer_node_id", identity.NodeID.String()). Str("message_type", msgType). From 1a221f161b68d42a1cfc672a905243bbb1e9c861 Mon Sep 17 00:00:00 2001 From: "Ramtin M. Seraj" Date: Tue, 19 Jul 2022 15:50:44 -0700 Subject: [PATCH 220/223] [FVM] merge constant size account registers in account status (#2799) --- .../execution_state_extract.go | 2 +- .../migrations/account_status_migration.go | 123 +++++++++++-- .../account_status_migration_test.go | 33 ++-- .../migrations/legacy_controller_migration.go | 2 +- .../legacy_controller_migration_test.go | 8 +- .../storage_used_update_migration.go | 26 ++- .../storage_used_update_migration_test.go | 29 +-- .../computation/computer/computer_test.go | 4 +- .../state/bootstrap/bootstrap_test.go | 2 +- fvm/accounts_test.go | 2 +- fvm/errors/accounts.go | 41 +++-- fvm/errors/codes.go | 1 + fvm/fvm_test.go | 8 +- fvm/state/accounts.go | 168 +++++++++--------- fvm/state/accounts_status.go | 115 +++++++++--- fvm/state/accounts_status_test.go | 55 ++++-- fvm/state/accounts_test.go | 18 +- fvm/state/state.go | 16 +- fvm/state/state_test.go | 3 - fvm/transactionInvoker_test.go | 4 +- module/mock/hot_stuff_follower.go | 17 +- utils/unittest/execution_state.go | 2 +- 22 files changed, 433 insertions(+), 246 deletions(-) diff --git a/cmd/util/cmd/execution-state-extract/execution_state_extract.go b/cmd/util/cmd/execution-state-extract/execution_state_extract.go index 499a2b0ec27..bae6425e36e 100644 --- a/cmd/util/cmd/execution-state-extract/execution_state_extract.go +++ b/cmd/util/cmd/execution-state-extract/execution_state_extract.go @@ -66,7 +66,7 @@ func extractExecutionState( Log: log, OutputDir: outputDir, } - accountStatusMigration := mgr.AccountStatusMigration{Logger: log} + accountStatusMigration := mgr.NewAccountStatusMigration(log) legacyControllerMigration := mgr.LegacyControllerMigration{Logger: log} migrations = []ledger.Migration{ diff --git a/cmd/util/ledger/migrations/account_status_migration.go b/cmd/util/ledger/migrations/account_status_migration.go index c1b26aecc11..1eaf9cc55c0 100644 --- a/cmd/util/ledger/migrations/account_status_migration.go +++ b/cmd/util/ledger/migrations/account_status_migration.go @@ -1,17 +1,27 @@ package migrations import ( + "encoding/binary" "encoding/hex" + "fmt" + "math/big" + "strings" + "github.com/onflow/atree" "github.com/rs/zerolog" - "github.com/onflow/flow-go/fvm/state" + "github.com/onflow/flow-go/engine/execution/state" + fvmState "github.com/onflow/flow-go/fvm/state" "github.com/onflow/flow-go/ledger" ) const ( - KeyExists = "exists" - KeyAccountFrozen = "frozen" + KeyExists = "exists" + KeyAccountFrozen = "frozen" + KeyPublicKeyCount = "public_key_count" + KeyStorageUsed = "storage_used" + KeyStorageIndex = "storage_index" + KeyPrefixPublicKey = "public_key_" ) // AccountStatusMigration migrates previous registers under @@ -23,28 +33,109 @@ const ( // This migration assumes no account has been frozen until now, and would warn if // find any account with frozen flags. type AccountStatusMigration struct { - Logger zerolog.Logger + logger zerolog.Logger + statuses map[string]*fvmState.AccountStatus + keyCounts map[string]uint64 +} + +func NewAccountStatusMigration(logger zerolog.Logger) *AccountStatusMigration { + return &AccountStatusMigration{ + logger: logger, + statuses: make(map[string]*fvmState.AccountStatus), + keyCounts: make(map[string]uint64), + } +} + +func (as *AccountStatusMigration) getOrInitStatus(owner []byte) *fvmState.AccountStatus { + st, exist := as.statuses[string(owner)] + if !exist { + return fvmState.NewAccountStatus() + } + return st +} + +func (as *AccountStatusMigration) setStatus(owner []byte, st *fvmState.AccountStatus) { + as.statuses[string(owner)] = st +} + +func (as *AccountStatusMigration) getKeyCount(owner []byte) uint64 { + return as.keyCounts[string(owner)] +} + +func (as *AccountStatusMigration) setKeyCount(owner []byte, count uint64) { + as.keyCounts[string(owner)] = count } func (as *AccountStatusMigration) Migrate(payload []ledger.Payload) ([]ledger.Payload, error) { newPayloads := make([]ledger.Payload, 0, len(payload)) + for _, p := range payload { owner := p.Key.KeyParts[0].Value - controller := p.Key.KeyParts[1].Value key := p.Key.KeyParts[2].Value - if len(controller) == 0 && string(key) == KeyExists { - newPayload := p.DeepCopy() - newPayload.Key.KeyParts[2].Value = []byte(state.KeyAccountStatus) - newPayload.Value = state.NewAccountStatus().ToBytes() - newPayloads = append(newPayloads, *newPayload) - continue + + switch string(key) { + case KeyExists: + // in case an account doesn't have other registers + // we need to consturct an status anyway + st := as.getOrInitStatus(owner) + as.setStatus(owner, st) + case KeyPublicKeyCount: + // follow the original way of decoding the value + countInt := new(big.Int).SetBytes(p.Value) + count := countInt.Uint64() + // update status + status := as.getOrInitStatus(owner) + status.SetPublicKeyCount(count) + as.setStatus(owner, status) + case KeyStorageUsed: + // follow the original way of decoding the value + if len(p.Value) < 8 { + return nil, fmt.Errorf("malsized storage used, owner: %s value: %s", hex.EncodeToString(owner), hex.EncodeToString(p.Value)) + } + used := binary.BigEndian.Uint64(p.Value[:8]) + // update status + status := as.getOrInitStatus(owner) + status.SetStorageUsed(used) + as.setStatus(owner, status) + case KeyStorageIndex: + // follow the original way of decoding the value + if len(p.Value) < 8 { + return nil, fmt.Errorf("malsized storage index, owner: %s value: %s", hex.EncodeToString(owner), hex.EncodeToString(p.Value)) + } + var index atree.StorageIndex + copy(index[:], p.Value[:8]) + // update status + status := as.getOrInitStatus(owner) + status.SetStorageIndex(index) + as.setStatus(owner, status) + case KeyAccountFrozen: + status := as.getOrInitStatus(owner) + status.SetFrozenFlag(true) + as.setStatus(owner, status) + default: // else just append and continue + // collect actual public keys per accounts for a final sanity check + if strings.HasPrefix(string(key), KeyPrefixPublicKey) { + as.setKeyCount(owner, as.getKeyCount(owner)+1) + } + newPayloads = append(newPayloads, p) } - if len(controller) == 0 && string(key) == KeyAccountFrozen { - as.Logger.Warn().Msgf("frozen account found: %s", hex.EncodeToString(owner)) - continue + } + + // instead of removed registers add status to accounts + for owner, status := range as.statuses { + // sanity check on key counts (if it doesn't match, it could cause real issues (e.g. key override)) + if status.PublicKeyCount() != as.getKeyCount([]byte(owner)) { + return nil, fmt.Errorf("public key count doesn't match, owner: %s count from status: %d, count from registers: %d", + hex.EncodeToString([]byte(owner)), + status.PublicKeyCount(), + as.getKeyCount([]byte(owner))) } - // else just append and continue - newPayloads = append(newPayloads, p) + newKey := ledger.NewKey([]ledger.KeyPart{ + ledger.NewKeyPart(state.KeyPartOwner, []byte(owner)), + ledger.NewKeyPart(1, []byte("")), // legacy controller + ledger.NewKeyPart(state.KeyPartKey, []byte(fvmState.KeyAccountStatus)), + }) + newPayloads = append(newPayloads, *ledger.NewPayload(newKey, status.ToBytes())) } return newPayloads, nil } diff --git a/cmd/util/ledger/migrations/account_status_migration_test.go b/cmd/util/ledger/migrations/account_status_migration_test.go index 942dc65731f..d79d94a8468 100644 --- a/cmd/util/ledger/migrations/account_status_migration_test.go +++ b/cmd/util/ledger/migrations/account_status_migration_test.go @@ -14,32 +14,43 @@ import ( ) func TestAccountStatusMigration(t *testing.T) { - mig := AccountStatusMigration{ - Logger: zerolog.Logger{}, - } + mig := NewAccountStatusMigration(zerolog.Logger{}) address1 := flow.HexToAddress("0x1") address2 := flow.HexToAddress("0x2") payloads := []ledger.Payload{ - {Key: createPayloadKeyWithLegacyController(address1, state.KeyStorageUsed, true), Value: utils.Uint64ToBinary(1)}, + {Key: createPayloadKeyWithLegacyController(address1, KeyStorageUsed, true), Value: utils.Uint64ToBinary(12)}, {Key: createPayloadKeyWithLegacyController(address1, "other registers", true), Value: utils.Uint64ToBinary(2)}, {Key: createPayloadKeyWithLegacyController(address2, "other registers2", true), Value: utils.Uint64ToBinary(3)}, {Key: createPayloadKeyWithLegacyController(address1, KeyExists, true), Value: []byte{1}}, {Key: createPayloadKeyWithLegacyController(address1, KeyAccountFrozen, true), Value: []byte{1}}, + {Key: createPayloadKeyWithLegacyController(address1, KeyPublicKeyCount, true), Value: utils.Uint64ToBinary(2)}, + {Key: createPayloadKeyWithLegacyController(address1, KeyPrefixPublicKey+"0", true), Value: []byte{1}}, + {Key: createPayloadKeyWithLegacyController(address1, KeyPrefixPublicKey+"1", true), Value: []byte{2}}, + {Key: createPayloadKeyWithLegacyController(address1, KeyStorageIndex, true), Value: []byte{1, 0, 0, 0, 0, 0, 0, 0}}, } newPayloads, err := mig.Migrate(payloads) require.NoError(t, err) - require.Equal(t, 4, len(newPayloads)) // no more frozen register - - require.True(t, newPayloads[0].Equals(&payloads[0])) - require.True(t, newPayloads[1].Equals(&payloads[1])) - require.True(t, newPayloads[2].Equals(&payloads[2])) + require.Equal(t, 5, len(newPayloads)) + require.True(t, newPayloads[0].Equals(&payloads[1])) + require.True(t, newPayloads[1].Equals(&payloads[2])) + require.True(t, newPayloads[2].Equals(&payloads[6])) + require.True(t, newPayloads[3].Equals(&payloads[7])) + + // check address one status + expectedStatus := state.NewAccountStatus() + expectedStatus.SetFrozenFlag(true) + expectedStatus.SetPublicKeyCount(2) + expectedStatus.SetStorageUsed(12) + expectedStatus.SetStorageIndex([8]byte{1, 0, 0, 0, 0, 0, 0, 0}) expectedPayload := &ledger.Payload{ Key: createPayloadKeyWithLegacyController(address1, state.KeyAccountStatus, true), - Value: state.NewAccountStatus().ToBytes(), + Value: expectedStatus.ToBytes(), } - require.True(t, newPayloads[3].Equals(expectedPayload)) + + // check address two status + require.True(t, newPayloads[4].Equals(expectedPayload)) } diff --git a/cmd/util/ledger/migrations/legacy_controller_migration.go b/cmd/util/ledger/migrations/legacy_controller_migration.go index daa4834570b..849e1bc94ee 100644 --- a/cmd/util/ledger/migrations/legacy_controller_migration.go +++ b/cmd/util/ledger/migrations/legacy_controller_migration.go @@ -28,7 +28,7 @@ func (lc *LegacyControllerMigration) Migrate(payload []ledger.Payload) ([]ledger if len(controller) > 0 { if bytes.Equal(owner, controller) && - string(key) != fvmState.KeyPublicKeyCount && // case - public key count + string(key) != KeyPublicKeyCount && // case - public key count !bytes.HasPrefix(key, []byte("public_key_")) && // case - public keys string(key) != fvmState.KeyContractNames && // case - contract names !bytes.HasPrefix(key, []byte(fvmState.KeyCode)) { // case - contracts diff --git a/cmd/util/ledger/migrations/legacy_controller_migration_test.go b/cmd/util/ledger/migrations/legacy_controller_migration_test.go index d5ebe1975dd..646a7e1c9d7 100644 --- a/cmd/util/ledger/migrations/legacy_controller_migration_test.go +++ b/cmd/util/ledger/migrations/legacy_controller_migration_test.go @@ -53,19 +53,19 @@ func TestLegacyControllerMigration(t *testing.T) { address2 := flow.HexToAddress("0x2") payloads := []ledger.Payload{ - {Key: createPayloadKeyWithLegacyController(address1, fvmstate.KeyStorageUsed, false), Value: utils.Uint64ToBinary(1)}, + {Key: createPayloadKeyWithLegacyController(address1, KeyStorageUsed, false), Value: utils.Uint64ToBinary(1)}, {Key: createPayloadKeyWithLegacyController(address1, fvmstate.ContractKey("CoreContract"), true), Value: utils.Uint64ToBinary(2)}, {Key: createPayloadKeyWithLegacyController(address1, fvmstate.KeyContractNames, true), Value: utils.Uint64ToBinary(3)}, {Key: createPayloadKeyWithLegacyController(address2, fvmstate.KeyPublicKey(1), true), Value: utils.Uint64ToBinary(4)}, - {Key: createPayloadKeyWithLegacyController(address2, fvmstate.KeyPublicKeyCount, true), Value: utils.Uint64ToBinary(4)}, + {Key: createPayloadKeyWithLegacyController(address2, KeyPublicKeyCount, true), Value: utils.Uint64ToBinary(4)}, } expectedKeys := []ledger.Key{ - createMigratedPayloadKey(address1, fvmstate.KeyStorageUsed), + createMigratedPayloadKey(address1, KeyStorageUsed), createMigratedPayloadKey(address1, fvmstate.ContractKey("CoreContract")), createMigratedPayloadKey(address1, fvmstate.KeyContractNames), createMigratedPayloadKey(address2, fvmstate.KeyPublicKey(1)), - createMigratedPayloadKey(address2, fvmstate.KeyPublicKeyCount), + createMigratedPayloadKey(address2, KeyPublicKeyCount), } newPayloads, err := mig.Migrate(payloads) diff --git a/cmd/util/ledger/migrations/storage_used_update_migration.go b/cmd/util/ledger/migrations/storage_used_update_migration.go index 1afb47f4f51..7206cda92da 100644 --- a/cmd/util/ledger/migrations/storage_used_update_migration.go +++ b/cmd/util/ledger/migrations/storage_used_update_migration.go @@ -12,9 +12,9 @@ import ( "golang.org/x/sync/errgroup" + "github.com/onflow/flow-go/fvm/state" fvm "github.com/onflow/flow-go/fvm/state" "github.com/onflow/flow-go/ledger" - "github.com/onflow/flow-go/ledger/common/utils" "github.com/onflow/flow-go/model/flow" "github.com/rs/zerolog" @@ -118,7 +118,7 @@ func (m *StorageUsedUpdateMigration) Migrate(payload []ledger.Payload) ([]ledger // not an address continue } - if id.Key == fvm.KeyStorageUsed { + if id.Key == fvm.KeyAccountStatus { storageUsedPayloadChan <- accountStorageUsedPayload{ Address: id.Owner, Index: p.Index, @@ -177,22 +177,16 @@ Loop: log.Error().Err(err).Msg("error converting key to register ID") return nil, err } - if id.Key != fvm.KeyStorageUsed { - return nil, fmt.Errorf("this is not a storage used register") + if id.Key != fvm.KeyAccountStatus { + return nil, fmt.Errorf("this is not a status register") } - oldUsed, _, err := utils.ReadUint64(p.Value) + status, err := state.AccountStatusFromBytes(p.Value) if err != nil { - errStr := "cannot decode storage used by address" - log.Error(). - Str("address", flow.BytesToAddress([]byte(a)).Hex()). - Hex("storageUsed", p.Value). - Hex("storageUsedKey", p.Key.CanonicalForm()). - Err(err). - Msg(errStr) - return nil, fmt.Errorf(errStr) + log.Error().Err(err).Msg("error getting status") + return nil, err } - + oldUsed := status.StorageUsed() if oldUsed > used { storageDecreaseCount += 1 change = -int64(oldUsed - used) @@ -207,8 +201,8 @@ Loop: if err != nil { return nil, err } - - payload[pIndex].Value = utils.Uint64ToBinary(used) + status.SetStorageUsed(used) + payload[pIndex].Value = status.ToBytes() } m.Log.Info(). diff --git a/cmd/util/ledger/migrations/storage_used_update_migration_test.go b/cmd/util/ledger/migrations/storage_used_update_migration_test.go index e00b853c1ae..f1f5f94d05d 100644 --- a/cmd/util/ledger/migrations/storage_used_update_migration_test.go +++ b/cmd/util/ledger/migrations/storage_used_update_migration_test.go @@ -8,7 +8,6 @@ import ( "github.com/onflow/flow-go/engine/execution/state" state2 "github.com/onflow/flow-go/fvm/state" "github.com/onflow/flow-go/ledger" - "github.com/onflow/flow-go/ledger/common/utils" "github.com/rs/zerolog" "github.com/stretchr/testify/require" @@ -24,48 +23,52 @@ func TestStorageUsedUpdateMigrationMigration(t *testing.T) { address1 := flow.HexToAddress("0x1") t.Run("fix storage used", func(t *testing.T) { + status := state2.NewAccountStatus() + status.SetStorageUsed(1) payload := []ledger.Payload{ - {Key: createAccountPayloadKey(address1, state2.KeyAccountStatus), Value: []byte{1}}, - {Key: createAccountPayloadKey(address1, state2.KeyStorageUsed), Value: utils.Uint64ToBinary(1)}, + // TODO (ramtin) add more registers + {Key: createAccountPayloadKey(address1, state2.KeyAccountStatus), Value: status.ToBytes()}, } migratedPayload, err := mig.Migrate(payload) require.NoError(t, err) - migratedSize, _, err := utils.ReadUint64(migratedPayload[1].Value) + migratedStatus, err := state2.AccountStatusFromBytes(migratedPayload[0].Value) require.NoError(t, err) require.Equal(t, len(migratedPayload), len(payload)) - require.Equal(t, uint64(48), migratedSize) + require.Equal(t, uint64(40), migratedStatus.StorageUsed()) }) t.Run("fix storage used if used to high", func(t *testing.T) { + status := state2.NewAccountStatus() + status.SetStorageUsed(10000) payload := []ledger.Payload{ - {Key: createAccountPayloadKey(address1, state2.KeyAccountStatus), Value: []byte{1}}, - {Key: createAccountPayloadKey(address1, state2.KeyStorageUsed), Value: utils.Uint64ToBinary(10000)}, + {Key: createAccountPayloadKey(address1, state2.KeyAccountStatus), Value: status.ToBytes()}, } migratedPayload, err := mig.Migrate(payload) require.NoError(t, err) - migratedSize, _, err := utils.ReadUint64(migratedPayload[1].Value) + migratedStatus, err := state2.AccountStatusFromBytes(migratedPayload[0].Value) require.NoError(t, err) require.Equal(t, len(migratedPayload), len(payload)) - require.Equal(t, uint64(48), migratedSize) + require.Equal(t, uint64(40), migratedStatus.StorageUsed()) }) t.Run("do not fix storage used if storage used ok", func(t *testing.T) { + status := state2.NewAccountStatus() + status.SetStorageUsed(40) payload := []ledger.Payload{ - {Key: createAccountPayloadKey(address1, state2.KeyAccountStatus), Value: []byte{1}}, - {Key: createAccountPayloadKey(address1, state2.KeyStorageUsed), Value: utils.Uint64ToBinary(55)}, + {Key: createAccountPayloadKey(address1, state2.KeyAccountStatus), Value: status.ToBytes()}, } migratedPayload, err := mig.Migrate(payload) require.NoError(t, err) - migratedSize, _, err := utils.ReadUint64(migratedPayload[1].Value) + migratedStatus, err := state2.AccountStatusFromBytes(migratedPayload[0].Value) require.NoError(t, err) require.Equal(t, len(migratedPayload), len(payload)) - require.Equal(t, uint64(48), migratedSize) + require.Equal(t, uint64(40), migratedStatus.StorageUsed()) }) t.Run("error is storage used does not exist", func(t *testing.T) { diff --git a/engine/execution/computation/computer/computer_test.go b/engine/execution/computation/computer/computer_test.go index c9c00b2fb7e..06bafbac6d7 100644 --- a/engine/execution/computation/computer/computer_test.go +++ b/engine/execution/computation/computer/computer_test.go @@ -451,7 +451,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { return nil, nil }) - err = view.Set(string(address.Bytes()), state.KeyAccountStatus, []byte{1}) + err = view.Set(string(address.Bytes()), state.KeyAccountStatus, state.NewAccountStatus().ToBytes()) require.NoError(t, err) result, err := exe.ExecuteBlock(context.Background(), block, view, programs.NewEmptyPrograms()) @@ -526,7 +526,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { return nil, nil }) - err = view.Set(string(address.Bytes()), state.KeyAccountStatus, []byte{1}) + err = view.Set(string(address.Bytes()), state.KeyAccountStatus, state.NewAccountStatus().ToBytes()) require.NoError(t, err) result, err := exe.ExecuteBlock(context.Background(), block, view, programs.NewEmptyPrograms()) diff --git a/engine/execution/state/bootstrap/bootstrap_test.go b/engine/execution/state/bootstrap/bootstrap_test.go index 8752aad8f36..4123b511182 100644 --- a/engine/execution/state/bootstrap/bootstrap_test.go +++ b/engine/execution/state/bootstrap/bootstrap_test.go @@ -47,7 +47,7 @@ func TestBootstrapLedger(t *testing.T) { } func TestBootstrapLedger_ZeroTokenSupply(t *testing.T) { - expectedStateCommitmentBytes, _ := hex.DecodeString("087fe6f4925bd2e74ae17d7e2a37ae6bd1d8b1a0e2b749e6ec5ddc7b7d35af49") + expectedStateCommitmentBytes, _ := hex.DecodeString("a60bf339bc5cfe60c2c70a3a492c3200e600443479098dd60b5613222ba57a70") expectedStateCommitment, err := flow.ToStateCommitment(expectedStateCommitmentBytes) require.NoError(t, err) diff --git a/fvm/accounts_test.go b/fvm/accounts_test.go index 7be886afa04..3df2a9c5d33 100644 --- a/fvm/accounts_test.go +++ b/fvm/accounts_test.go @@ -1336,7 +1336,7 @@ func TestAccountBalanceFields(t *testing.T) { assert.NoError(t, err) assert.NoError(t, script.Err) - assert.Equal(t, cadence.UFix64(9999_2710), script.Value) + assert.Equal(t, cadence.UFix64(9999_3120), script.Value) }), ) diff --git a/fvm/errors/accounts.go b/fvm/errors/accounts.go index 51631ed66c9..a49720a7852 100644 --- a/fvm/errors/accounts.go +++ b/fvm/errors/accounts.go @@ -118,31 +118,36 @@ func (e FrozenAccountError) Code() ErrorCode { return ErrCodeFrozenAccountError } -// StorageNotInitialized captures a fatal error when trying to update storage used on a non-initialized account -type StorageNotInitialized struct { - Address string +// AccountPublicKeyLimitError is returned when an account tries to add public keys over the limit +type AccountPublicKeyLimitError struct { + address flow.Address + counts uint64 + limit uint64 } -// NewStorageNotInitialized formats and returns a new StorageNotInitialized -func NewStorageNotInitialized(address string) *StorageNotInitialized { - return &StorageNotInitialized{ - Address: address, +// NewAccountPublicKeyLimitError constructs a new AccountPublicKeyLimitError +func NewAccountPublicKeyLimitError(address flow.Address, counts, limit uint64) error { + return &AccountPublicKeyLimitError{ + address: address, + counts: counts, + limit: limit, } } -func (e *StorageNotInitialized) Error() string { - return fmt.Sprintf("%s account %s storage used is not initialized or not initialized correctly", - e.Code().String(), - e.Address) +// Address returns the address of frozen account +func (e AccountPublicKeyLimitError) Address() flow.Address { + return e.address } -// Code returns the failure code -func (e *StorageNotInitialized) Code() ErrorCode { - return ErrCodeAccountStorageNotInitializedError +func (e AccountPublicKeyLimitError) Error() string { + return fmt.Sprintf("%s account's (%s) public key count (%d) exceeded the limit (%d)", + e.Code().String(), + e.address, + e.counts, + e.limit) } -// IsStorageNotInitializedFailure checks if the error is a StorageNotInitializedFailure -func IsStorageNotInitializedFailure(err error) bool { - var t *StorageNotInitialized - return errors.As(err, &t) +// Code returns the error code for this error type +func (e AccountPublicKeyLimitError) Code() ErrorCode { + return ErrCodeAccountPublicKeyLimitError } diff --git a/fvm/errors/codes.go b/fvm/errors/codes.go index 4af512b4551..ad438b75456 100644 --- a/fvm/errors/codes.go +++ b/fvm/errors/codes.go @@ -72,6 +72,7 @@ const ( ErrCodeAccountAlreadyExistsError ErrorCode = 1203 ErrCodeFrozenAccountError ErrorCode = 1204 ErrCodeAccountStorageNotInitializedError ErrorCode = 1205 + ErrCodeAccountPublicKeyLimitError ErrorCode = 1206 // contract errors 1250 - 1300 // ErrCodeContractError ErrorCode = 1250 - reserved diff --git a/fvm/fvm_test.go b/fvm/fvm_test.go index 678758715d3..3d0fa8d9e79 100644 --- a/fvm/fvm_test.go +++ b/fvm/fvm_test.go @@ -2,7 +2,6 @@ package fvm_test import ( "crypto/rand" - "encoding/binary" "encoding/hex" "fmt" "math" @@ -1455,11 +1454,10 @@ func TestStorageUsed(t *testing.T) { address, err := hex.DecodeString("2a3c4c2581cef731") require.NoError(t, err) - storageUsed := make([]byte, 8) - binary.BigEndian.PutUint64(storageUsed, 5) - simpleView := utils.NewSimpleView() - err = simpleView.Set(string(address), state.KeyStorageUsed, storageUsed) + status := state.NewAccountStatus() + status.SetStorageUsed(5) + err = simpleView.Set(string(address), state.KeyAccountStatus, status.ToBytes()) require.NoError(t, err) script := fvm.Script(code) diff --git a/fvm/state/accounts.go b/fvm/state/accounts.go index c541f046b9e..1c90c6e166c 100644 --- a/fvm/state/accounts.go +++ b/fvm/state/accounts.go @@ -2,9 +2,8 @@ package state import ( "bytes" - "encoding/binary" "fmt" - "math/big" + "math" "sort" "github.com/fxamacker/cbor/v2" @@ -19,12 +18,9 @@ import ( const ( AccountKeyPrefix = "a." KeyAccountStatus = AccountKeyPrefix + "s" - KeyPublicKeyCount = "public_key_count" KeyCode = "code" KeyContractNames = "contract_names" - KeyStorageUsed = "storage_used" - KeyStorageIndex = "storage_index" - uint64StorageSize = 8 + MaxPublicKeyCount = math.MaxUint64 ) type Accounts interface { @@ -61,20 +57,14 @@ func NewAccounts(stateHolder *StateHolder) *StatefulAccounts { } func (a *StatefulAccounts) AllocateStorageIndex(address flow.Address) (atree.StorageIndex, error) { - indexBytes, err := a.GetValue(address, KeyStorageIndex) + // get status + status, err := a.getAccountStatus(address) if err != nil { return atree.StorageIndex{}, err } - if len(indexBytes) == 0 { - // if not exist for the first time set it to zero and return - indexBytes = []byte{0, 0, 0, 0, 0, 0, 0, 1} - } else if len(indexBytes) != uint64StorageSize { - return atree.StorageIndex{}, fmt.Errorf("invalid storage index byte size (%d != 8)", len(indexBytes)) - } - - var index atree.StorageIndex - copy(index[:], indexBytes[:8]) + // get and increment the index + index := status.StorageIndex() newIndexBytes := index.Next() // store nil so that the setValue for new allocated slabs would be faster @@ -83,13 +73,14 @@ func (a *StatefulAccounts) AllocateStorageIndex(address flow.Address) (atree.Sto key := atree.SlabIndexToLedgerKey(index) err = a.stateHolder.State().Set(string(address.Bytes()), string(key), []byte{}, false) if err != nil { - return atree.StorageIndex{}, fmt.Errorf("failed to store empty value for newly allocated storage index: %w", err) + return atree.StorageIndex{}, fmt.Errorf("failed to allocate an storage index: %w", err) } // update the storageIndex bytes - err = a.SetValue(address, KeyStorageIndex, newIndexBytes[:]) + status.SetStorageIndex(newIndexBytes) + err = a.setAccountStatus(address, status) if err != nil { - return atree.StorageIndex{}, fmt.Errorf("failed to store the key storage index: %w", err) + return atree.StorageIndex{}, fmt.Errorf("failed to allocate an storage index: %w", err) } return index, nil } @@ -140,34 +131,36 @@ func (a *StatefulAccounts) Exists(address flow.Address) (bool, error) { return false, err } - accStatus, err := AccountStatusFromBytes(accStatusBytes) + // account doesn't exist if account status doesn't exist + if len(accStatusBytes) == 0 { + return false, nil + } + + // check if we can construct account status from the value of this register + _, err = AccountStatusFromBytes(accStatusBytes) if err != nil { return false, err } - return accStatus.AccountExists(), nil + return true, nil } // Create account sets all required registers on an address. func (a *StatefulAccounts) Create(publicKeys []flow.AccountPublicKey, newAddress flow.Address) error { exists, err := a.Exists(newAddress) if err != nil { - return err + return fmt.Errorf("failed to create a new account: %w", err) } if exists { return errors.NewAccountAlreadyExistsError(newAddress) } - storageUsedByStorageUsed := uint64(RegisterSize(newAddress, KeyStorageUsed, make([]byte, uint64StorageSize))) - err = a.setStorageUsed(newAddress, storageUsedByStorageUsed) - if err != nil { - return err - } + accountStatus := NewAccountStatus() + accountStatus.SetStorageUsed(uint64(RegisterSize(newAddress, KeyAccountStatus, accountStatus.ToBytes()))) - // mark that this account exists - err = a.SetValue(newAddress, KeyAccountStatus, NewAccountStatus().ToBytes()) + err = a.setAccountStatus(newAddress, accountStatus) if err != nil { - return err + return fmt.Errorf("failed to create a new account: %w", err) } return a.SetAllPublicKeys(newAddress, publicKeys) } @@ -191,26 +184,26 @@ func (a *StatefulAccounts) GetPublicKey(address flow.Address, keyIndex uint64) ( } func (a *StatefulAccounts) GetPublicKeyCount(address flow.Address) (uint64, error) { - countBytes, err := a.GetValue(address, KeyPublicKeyCount) + status, err := a.getAccountStatus(address) if err != nil { - return 0, err + return 0, fmt.Errorf("failed to get public key count: %w", err) } - - countInt := new(big.Int).SetBytes(countBytes) - if !countInt.IsUint64() { - return 0, fmt.Errorf( - "retrieved public key account count bytes (hex-encoded): %x does not represent valid uint64", - countBytes, - ) - } - - return countInt.Uint64(), nil + return status.PublicKeyCount(), nil } func (a *StatefulAccounts) setPublicKeyCount(address flow.Address, count uint64) error { - newCount := new(big.Int).SetUint64(count) + status, err := a.getAccountStatus(address) + if err != nil { + return fmt.Errorf("failed to set public key count for account (%s): %w", address.String(), err) + } + + status.SetPublicKeyCount(count) - return a.SetValue(address, KeyPublicKeyCount, newCount.Bytes()) + err = a.setAccountStatus(address, status) + if err != nil { + return fmt.Errorf("failed to set public key count for account (%s): %w", address.String(), err) + } + return nil } func (a *StatefulAccounts) GetPublicKeys(address flow.Address) (publicKeys []flow.AccountPublicKey, err error) { @@ -255,6 +248,13 @@ func (a *StatefulAccounts) SetPublicKey( } func (a *StatefulAccounts) SetAllPublicKeys(address flow.Address, publicKeys []flow.AccountPublicKey) error { + + count := uint64(len(publicKeys)) // len returns int and this will not exceed uint64 + + if count >= MaxPublicKeyCount { + return errors.NewAccountPublicKeyLimitError(address, count, MaxPublicKeyCount) + } + for i, publicKey := range publicKeys { _, err := a.SetPublicKey(address, uint64(i), publicKey) if err != nil { @@ -262,8 +262,6 @@ func (a *StatefulAccounts) SetAllPublicKeys(address flow.Address, publicKeys []f } } - count := uint64(len(publicKeys)) // len returns int and this will not exceed uint64 - return a.setPublicKeyCount(address, count) } @@ -282,6 +280,10 @@ func (a *StatefulAccounts) AppendPublicKey(address flow.Address, publicKey flow. return err } + if count >= MaxPublicKeyCount { + return errors.NewAccountPublicKeyLimitError(address, count+1, MaxPublicKeyCount) + } + _, err = a.SetPublicKey(address, count, publicKey) if err != nil { return err @@ -385,25 +387,26 @@ func (a *StatefulAccounts) setContractNames(contractNames contractNames, address // GetStorageUsed returns the amount of storage used in bytes by this account func (a *StatefulAccounts) GetStorageUsed(address flow.Address) (uint64, error) { - storageUsedRegister, err := a.GetValue(address, KeyStorageUsed) + status, err := a.getAccountStatus(address) if err != nil { - return 0, err + return 0, fmt.Errorf("failed to get storage used: %w", err) } + return status.StorageUsed(), nil +} - if len(storageUsedRegister) != uint64StorageSize { - return 0, errors.NewStorageNotInitialized(address.Hex()) +func (a *StatefulAccounts) setStorageUsed(address flow.Address, used uint64) error { + status, err := a.getAccountStatus(address) + if err != nil { + return fmt.Errorf("failed to set storage used: %w", err) } - storageUsed, _, err := readUint64(storageUsedRegister) + status.SetStorageUsed(used) + + err = a.setAccountStatus(address, status) if err != nil { - return 0, err + return fmt.Errorf("failed to set storage used: %w", err) } - return storageUsed, nil -} - -func (a *StatefulAccounts) setStorageUsed(address flow.Address, used uint64) error { - usedBinary := uint64ToBinary(used) - return a.SetValue(address, KeyStorageUsed, usedBinary) + return nil } func (a *StatefulAccounts) GetValue(address flow.Address, key string) (flow.RegisterValue, error) { @@ -421,8 +424,8 @@ func (a *StatefulAccounts) SetValue(address flow.Address, key string, value flow } func (a *StatefulAccounts) updateRegisterSizeChange(address flow.Address, key string, value flow.RegisterValue) error { - if key == KeyStorageUsed { - // size of this register is always uint64StorageSize + if key == KeyAccountStatus { + // size of this register is always fixed size // don't double check this to save time and prevent recursion return nil } @@ -567,49 +570,38 @@ func (a *StatefulAccounts) DeleteContract(contractName string, address flow.Addr return a.setContractNames(contractNames, address) } -// uint64ToBinary converst a uint64 to a byte slice (big endian) -func uint64ToBinary(integer uint64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, integer) - return b +func (a *StatefulAccounts) getAccountStatus(address flow.Address) (*AccountStatus, error) { + statusBytes, err := a.GetValue(address, KeyAccountStatus) + if err != nil { + return nil, fmt.Errorf("failed to load account status for the account (%s): %w", address.String(), err) + } + + return AccountStatusFromBytes(statusBytes) } -// readUint64 reads a uint64 from the input and returns the rest -func readUint64(input []byte) (value uint64, rest []byte, err error) { - if len(input) < 8 { - return 0, input, fmt.Errorf("input size (%d) is too small to read a uint64", len(input)) +func (a *StatefulAccounts) setAccountStatus(address flow.Address, status *AccountStatus) error { + err := a.SetValue(address, KeyAccountStatus, status.ToBytes()) + if err != nil { + return fmt.Errorf("failed to store the account status for account (%s): %w", address.String(), err) } - return binary.BigEndian.Uint64(input[:8]), input[8:], nil + return nil } func (a *StatefulAccounts) GetAccountFrozen(address flow.Address) (bool, error) { - accStatusBytes, err := a.GetValue(address, KeyAccountStatus) - if err != nil { - return false, err - } - accStatus, err := AccountStatusFromBytes(accStatusBytes) + status, err := a.getAccountStatus(address) if err != nil { return false, err } - // TODO introduce this logic later - // // if account does not exist, frozen is not meaningful - // if !accStatus.AccountExists() { - // return false, errors.NewAccountNotFoundError(address) - // } - return accStatus.IsAccountFrozen(), nil + return status.IsAccountFrozen(), nil } func (a *StatefulAccounts) SetAccountFrozen(address flow.Address, frozen bool) error { - accStatusBytes, err := a.GetValue(address, KeyAccountStatus) - if err != nil { - return err - } - accStatus, err := AccountStatusFromBytes(accStatusBytes) + status, err := a.getAccountStatus(address) if err != nil { return err } - accStatus = SetAccountStatusFrozenFlag(accStatus, frozen) - return a.SetValue(address, KeyAccountStatus, accStatus.ToBytes()) + status.SetFrozenFlag(frozen) + return a.setAccountStatus(address, status) } // handy function to error out if account is frozen diff --git a/fvm/state/accounts_status.go b/fvm/state/accounts_status.go index 9ee2442887c..45e8271c9ab 100644 --- a/fvm/state/accounts_status.go +++ b/fvm/state/accounts_status.go @@ -1,51 +1,116 @@ package state import ( + "encoding/binary" "encoding/hex" + "github.com/onflow/atree" + "github.com/onflow/flow-go/fvm/errors" ) -type AccountStatus uint8 +const ( + flagSize = 1 + storageUsedSize = 8 + storageIndexSize = 8 + publicKeyCountsSize = 8 + + accountStatusSize = flagSize + + storageUsedSize + + storageIndexSize + + publicKeyCountsSize + + flagIndex = 0 + storageUsedStartIndex = flagIndex + flagSize + storageIndexStartIndex = storageUsedStartIndex + storageUsedSize + publicKeyCountsStartIndex = storageIndexStartIndex + storageIndexSize +) + +// AccountStatus holds meta information about an account +// +// currently modelled as a byte array with on-demand encoding/decoding of sub arrays +// the first byte captures flags (e.g. frozen) +// the next 8 bytes (big-endian) captures storage used by an account +// the next 8 bytes (big-endian) captures the storage index of an account +// and the last 8 bytes (big-endian) captures the number of public keys stored on this account +type AccountStatus [accountStatusSize]byte const ( - maskExist byte = 0b0000_0001 maskFrozen byte = 0b1000_0000 ) -// NewAccountStatus sets exist flag and return an AccountStatus -func NewAccountStatus() AccountStatus { - return AccountStatus(maskExist) +// NewAccountStatus returns a new AccountStatus +// sets the storage index to the init value +func NewAccountStatus() *AccountStatus { + return &AccountStatus{ + 0, // initial empty flags + 0, 0, 0, 0, 0, 0, 0, 0, // init value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // init value for storage index + 0, 0, 0, 0, 0, 0, 0, 0, // init value for public key counts + } } -func (a AccountStatus) ToBytes() []byte { - b := make([]byte, 1) - b[0] = byte(a) - return b +// ToBytes converts AccountStatus to a byte slice +// +// this has been kept this way in case one day +// we decided to move on to use an struct to represent +// account status. +func (a *AccountStatus) ToBytes() []byte { + return a[:] } -func AccountStatusFromBytes(inp []byte) (AccountStatus, error) { - // if len of inp is zero, account does not exist - if len(inp) == 0 { - return 0, nil +// AccountStatusFromBytes constructs an AccountStatus from the given byte slice +func AccountStatusFromBytes(inp []byte) (*AccountStatus, error) { + var as AccountStatus + if len(inp) != accountStatusSize { + return &as, errors.NewValueErrorf(hex.EncodeToString(inp), "invalid account status size") } - if len(inp) > 1 { - return 0, errors.NewValueErrorf(hex.EncodeToString(inp), "invalid account state") + copy(as[:], inp) + return &as, nil +} + +// IsAccountFrozen returns true if account's frozen flag is set +func (a *AccountStatus) IsAccountFrozen() bool { + return a[flagIndex]&maskFrozen > 0 +} + +// SetFrozenFlag sets the frozen flag +func (a *AccountStatus) SetFrozenFlag(frozen bool) { + if frozen { + a[flagIndex] = a[flagIndex] | maskFrozen + return } - return AccountStatus(inp[0]), nil + a[flagIndex] = a[flagIndex] & (0xFF - maskFrozen) } -func (a AccountStatus) AccountExists() bool { - return a > 0 +// SetStorageUsed updates the storage used by the account +func (a *AccountStatus) SetStorageUsed(used uint64) { + binary.BigEndian.PutUint64(a[storageUsedStartIndex:storageUsedStartIndex+storageUsedSize], used) } -func (a AccountStatus) IsAccountFrozen() bool { - return uint8(a)&maskFrozen > 0 +// StorageUsed returns the storage used by the account +func (a *AccountStatus) StorageUsed() uint64 { + return binary.BigEndian.Uint64(a[storageUsedStartIndex : storageUsedStartIndex+storageUsedSize]) } -func SetAccountStatusFrozenFlag(inp AccountStatus, frozen bool) AccountStatus { - if frozen { - return AccountStatus(uint8(inp) | maskFrozen) - } - return AccountStatus(uint8(inp) & (0xFF - maskFrozen)) +// SetStorageIndex updates the storage index of the account +func (a *AccountStatus) SetStorageIndex(index atree.StorageIndex) { + copy(a[storageIndexStartIndex:storageIndexStartIndex+storageIndexSize], index[:storageIndexSize]) +} + +// StorageIndex returns the storage index of the account +func (a *AccountStatus) StorageIndex() atree.StorageIndex { + var index atree.StorageIndex + copy(index[:], a[storageIndexStartIndex:storageIndexStartIndex+storageIndexSize]) + return index +} + +// SetPublicKeyCount updates the public key count of the account +func (a *AccountStatus) SetPublicKeyCount(count uint64) { + binary.BigEndian.PutUint64(a[publicKeyCountsStartIndex:publicKeyCountsStartIndex+publicKeyCountsSize], count) +} + +// PublicKeyCount returns the public key count of the account +func (a *AccountStatus) PublicKeyCount() uint64 { + return binary.BigEndian.Uint64(a[publicKeyCountsStartIndex : publicKeyCountsStartIndex+publicKeyCountsSize]) } diff --git a/fvm/state/accounts_status_test.go b/fvm/state/accounts_status_test.go index 2765fa15b72..2a77c28eb79 100644 --- a/fvm/state/accounts_status_test.go +++ b/fvm/state/accounts_status_test.go @@ -1,8 +1,10 @@ package state_test import ( + "bytes" "testing" + "github.com/onflow/atree" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/state" @@ -11,20 +13,45 @@ import ( func TestAccountStatus(t *testing.T) { s := state.NewAccountStatus() - require.True(t, s.AccountExists()) require.False(t, s.IsAccountFrozen()) - s = state.SetAccountStatusFrozenFlag(s, true) - require.True(t, s.AccountExists()) - require.True(t, s.IsAccountFrozen()) - - s = state.SetAccountStatusFrozenFlag(s, false) - require.True(t, s.AccountExists()) - require.False(t, s.IsAccountFrozen()) - - var err error - s, err = state.AccountStatusFromBytes(s.ToBytes()) - require.NoError(t, err) - require.True(t, s.AccountExists()) - require.False(t, s.IsAccountFrozen()) + t.Run("test frozen flag set/reset", func(t *testing.T) { + s.SetFrozenFlag(true) + require.True(t, s.IsAccountFrozen()) + + s.SetFrozenFlag(false) + require.False(t, s.IsAccountFrozen()) + }) + + t.Run("test setting values", func(t *testing.T) { + // set some values for side effect checks + s.SetFrozenFlag(true) + + index := atree.StorageIndex{1, 2, 3, 4, 5, 6, 7, 8} + s.SetStorageIndex(index) + s.SetPublicKeyCount(34) + s.SetStorageUsed(56) + + require.Equal(t, uint64(56), s.StorageUsed()) + returnedIndex := s.StorageIndex() + require.True(t, bytes.Equal(index[:], returnedIndex[:])) + require.Equal(t, uint64(34), s.PublicKeyCount()) + + // check no side effect on flags + require.True(t, s.IsAccountFrozen()) + }) + + t.Run("test serialization", func(t *testing.T) { + b := append([]byte(nil), s.ToBytes()...) + clone, err := state.AccountStatusFromBytes(b) + require.NoError(t, err) + require.Equal(t, s.IsAccountFrozen(), clone.IsAccountFrozen()) + require.Equal(t, s.StorageIndex(), clone.StorageIndex()) + require.Equal(t, s.PublicKeyCount(), clone.PublicKeyCount()) + require.Equal(t, s.StorageUsed(), clone.StorageUsed()) + + // invalid size bytes + _, err = state.AccountStatusFromBytes([]byte{1, 2}) + require.Error(t, err) + }) } diff --git a/fvm/state/accounts_test.go b/fvm/state/accounts_test.go index 93b13b5a794..5d16dfccd3f 100644 --- a/fvm/state/accounts_test.go +++ b/fvm/state/accounts_test.go @@ -23,8 +23,8 @@ func TestAccounts_Create(t *testing.T) { err := accounts.Create(nil, address) require.NoError(t, err) - // storage_used + exists + key count - require.Equal(t, len(view.Ledger.RegisterTouches), 3) + // account status + require.Equal(t, len(view.Ledger.RegisterTouches), 1) }) t.Run("Fails if account exists", func(t *testing.T) { @@ -272,7 +272,7 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(48), storageUsed) + require.Equal(t, uint64(40), storageUsed) }) t.Run("Storage used on register set increases", func(t *testing.T) { @@ -289,7 +289,7 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(48+32), storageUsed) + require.Equal(t, uint64(40+32), storageUsed) }) t.Run("Storage used, set twice on same register to same value, stays the same", func(t *testing.T) { @@ -308,7 +308,7 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(48+32), storageUsed) + require.Equal(t, uint64(40+32), storageUsed) }) t.Run("Storage used, set twice on same register to larger value, increases", func(t *testing.T) { @@ -327,7 +327,7 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(48+33), storageUsed) + require.Equal(t, uint64(40+33), storageUsed) }) t.Run("Storage used, set twice on same register to smaller value, decreases", func(t *testing.T) { @@ -346,7 +346,7 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(48+31), storageUsed) + require.Equal(t, uint64(40+31), storageUsed) }) t.Run("Storage used, after register deleted, decreases", func(t *testing.T) { @@ -365,7 +365,7 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(48+0), storageUsed) + require.Equal(t, uint64(40+0), storageUsed) }) t.Run("Storage used on a complex scenario has correct value", func(t *testing.T) { @@ -394,7 +394,7 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(48+33+42), storageUsed) + require.Equal(t, uint64(40+33+42), storageUsed) }) } diff --git a/fvm/state/state.go b/fvm/state/state.go index 8ed98fde388..0a970c3e1bd 100644 --- a/fvm/state/state.go +++ b/fvm/state/state.go @@ -339,17 +339,11 @@ func IsFVMStateKey(owner, key string) bool { } // check account level keys // cases: - // - address, "public_key_count" - // - address, "public_key_%d" (index) // - address, "contract_names" // - address, "code.%s" (contract name) - // - address, exists - // - address, "storage_used" - // - address, "frozen" + // - address, "public_key_%d" (index) + // - address, "a.s" (account status) - if key == KeyPublicKeyCount { - return true - } if bytes.HasPrefix([]byte(key), []byte("public_key_")) { return true } @@ -362,12 +356,6 @@ func IsFVMStateKey(owner, key string) bool { if key == KeyAccountStatus { return true } - if key == KeyStorageUsed { - return true - } - if key == KeyStorageIndex { - return true - } return false } diff --git a/fvm/state/state_test.go b/fvm/state/state_test.go index 24b93d2a53e..36e753c42a6 100644 --- a/fvm/state/state_test.go +++ b/fvm/state/state_test.go @@ -201,13 +201,10 @@ func TestState_MaxInteraction(t *testing.T) { func TestState_IsFVMStateKey(t *testing.T) { require.True(t, state.IsFVMStateKey("", "uuid")) - require.True(t, state.IsFVMStateKey("Address", state.KeyPublicKeyCount)) require.True(t, state.IsFVMStateKey("Address", "public_key_12")) require.True(t, state.IsFVMStateKey("Address", state.KeyContractNames)) require.True(t, state.IsFVMStateKey("Address", "code.MYCODE")) require.True(t, state.IsFVMStateKey("Address", state.KeyAccountStatus)) - require.True(t, state.IsFVMStateKey("Address", state.KeyStorageUsed)) - require.True(t, state.IsFVMStateKey("Address", state.KeyAccountStatus)) require.False(t, state.IsFVMStateKey("Address", "anything else")) } diff --git a/fvm/transactionInvoker_test.go b/fvm/transactionInvoker_test.go index 2c975cee154..bf4ea8128ed 100644 --- a/fvm/transactionInvoker_test.go +++ b/fvm/transactionInvoker_test.go @@ -68,7 +68,7 @@ func TestSafetyCheck(t *testing.T) { err = view.Set( string(contractAddress.Bytes()), state.KeyAccountStatus, - []byte{1}, + state.NewAccountStatus().ToBytes(), ) require.NoError(t, err) @@ -144,7 +144,7 @@ func TestSafetyCheck(t *testing.T) { err = view.Set( string(contractAddress.Bytes()), state.KeyAccountStatus, - []byte{1}, + state.NewAccountStatus().ToBytes(), ) require.NoError(t, err) err = view.Set( diff --git a/module/mock/hot_stuff_follower.go b/module/mock/hot_stuff_follower.go index 6a779e4e8c7..281bc1eca82 100644 --- a/module/mock/hot_stuff_follower.go +++ b/module/mock/hot_stuff_follower.go @@ -1,4 +1,4 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. +// Code generated by mockery v2.13.0. DO NOT EDIT. package mock @@ -59,3 +59,18 @@ func (_m *HotStuffFollower) SubmitProposal(proposal *flow.Header, parentView uin return r0 } + +type NewHotStuffFollowerT interface { + mock.TestingT + Cleanup(func()) +} + +// NewHotStuffFollower creates a new instance of HotStuffFollower. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewHotStuffFollower(t NewHotStuffFollowerT) *HotStuffFollower { + mock := &HotStuffFollower{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/utils/unittest/execution_state.go b/utils/unittest/execution_state.go index 12030b97ed5..d5b07c4b809 100644 --- a/utils/unittest/execution_state.go +++ b/utils/unittest/execution_state.go @@ -24,7 +24,7 @@ const ServiceAccountPrivateKeySignAlgo = crypto.ECDSAP256 const ServiceAccountPrivateKeyHashAlgo = hash.SHA2_256 // Pre-calculated state commitment with root account with the above private key -const GenesisStateCommitmentHex = "157856b8fde6760413cbcc87737778407446f192b8602bb7530a9d8c2be4c5da" +const GenesisStateCommitmentHex = "50a9f9bbf5ffcd52e1b786e93125ed0e6c21a126f2eeac20e9269401339c5ee3" var GenesisStateCommitment flow.StateCommitment From 10b0a82f5b87fbe0d6219f21948986d120606b79 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Tue, 19 Jul 2022 17:09:53 -0700 Subject: [PATCH 221/223] fixed decoding error for signer indices at epoch switchover --- consensus/hotstuff/signature/block_signer_decoder.go | 9 +++++---- .../hotstuff/signature/block_signer_decoder_test.go | 8 ++++---- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/consensus/hotstuff/signature/block_signer_decoder.go b/consensus/hotstuff/signature/block_signer_decoder.go index 65d59cb3b45..fa93f00eb83 100644 --- a/consensus/hotstuff/signature/block_signer_decoder.go +++ b/consensus/hotstuff/signature/block_signer_decoder.go @@ -35,15 +35,16 @@ func (b *BlockSignerDecoder) DecodeSignerIDs(header *flow.Header) (flow.Identifi return []flow.Identifier{}, nil } - id := header.ID() - members, err := b.Identities(id) + // The block header contains the signatures for the parents. Hence, we need to get the + // identities that were authorized to sign the parent block, to decode the signer indices. + members, err := b.Identities(header.ParentID) if err != nil { // TODO: this potentially needs to be updated when we implement and document proper error handling for // `hotstuff.Committee` and underlying code (such as `protocol.Snapshot`) if errors.Is(err, storage.ErrNotFound) { - return nil, state.WrapAsUnknownBlockError(id, err) + return nil, state.WrapAsUnknownBlockError(header.ID(), err) } - return nil, fmt.Errorf("fail to retrieve identities for block %v: %w", id, err) + return nil, fmt.Errorf("fail to retrieve identities for block %v: %w", header.ID(), err) } signerIDs, err := signature.DecodeSignerIndicesToIdentifiers(members.NodeIDs(), header.ParentVoterIndices) if err != nil { diff --git a/consensus/hotstuff/signature/block_signer_decoder_test.go b/consensus/hotstuff/signature/block_signer_decoder_test.go index 9bf63e9b4b2..021425a3a0c 100644 --- a/consensus/hotstuff/signature/block_signer_decoder_test.go +++ b/consensus/hotstuff/signature/block_signer_decoder_test.go @@ -34,16 +34,16 @@ func (s *blockSignerDecoderSuite) SetupTest() { // the default header fixture creates signerIDs for a committee of 10 nodes, so we prepare a committee same as that s.allConsensus = unittest.IdentityListFixture(40, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) - // mock consensus committee - s.committee = new(hotstuff.Committee) - s.committee.On("Identities", mock.Anything).Return(s.allConsensus, nil) - // prepare valid test block: voterIndices, err := signature.EncodeSignersToIndices(s.allConsensus.NodeIDs(), s.allConsensus.NodeIDs()) require.NoError(s.T(), err) s.block = unittest.BlockFixture() s.block.Header.ParentVoterIndices = voterIndices + // mock consensus committee + s.committee = new(hotstuff.Committee) + s.committee.On("Identities", s.block.Header.ParentID).Return(s.allConsensus, nil) + s.decoder = NewBlockSignerDecoder(s.committee) } From 483f5b5538764af4827d707dbfec9a061f2e8ea6 Mon Sep 17 00:00:00 2001 From: Patrick Lee Date: Fri, 8 Jul 2022 13:28:42 -0700 Subject: [PATCH 222/223] Fix Unit race condition on stopping. Race scenario: goroutine 1 (calls unit.Launch) checked the context has not cancelled context switch goroutine 2 (calls unit.Done) cancels the context finished waiting for the wait group (unit is "Done" at this point) context switch goroutine 1 (remaining half of the original Launch call) add to wait group --- engine/unit.go | 37 ++++++++++++++++++++++++++++--------- 1 file changed, 28 insertions(+), 9 deletions(-) diff --git a/engine/unit.go b/engine/unit.go index 00633e73108..3dc6b4fd4c6 100644 --- a/engine/unit.go +++ b/engine/unit.go @@ -10,6 +10,8 @@ import ( // Unit handles synchronization management, startup, and shutdown for engines. type Unit struct { + admitLock sync.Mutex // used for synchronizing context cancellation with work admittance + wg sync.WaitGroup // tracks in-progress functions ctx context.Context // context that is cancelled when the unit is Done cancel context.CancelFunc // cancels the context @@ -27,16 +29,35 @@ func NewUnit() *Unit { return unit } +func (u *Unit) admit() bool { + u.admitLock.Lock() + defer u.admitLock.Unlock() + + select { + case <-u.ctx.Done(): + return false + default: + } + + u.wg.Add(1) + return true +} + +func (u *Unit) stopAdmitting() { + u.admitLock.Lock() + defer u.admitLock.Unlock() + + u.cancel() +} + // Do synchronously executes the input function f unless the unit has shut down. // It returns the result of f. If f is executed, the unit will not shut down // until after f returns. func (u *Unit) Do(f func() error) error { - select { - case <-u.ctx.Done(): + if !u.admit() { return nil - default: } - u.wg.Add(1) + defer u.wg.Done() return f() } @@ -44,12 +65,10 @@ func (u *Unit) Do(f func() error) error { // Launch asynchronously executes the input function unless the unit has shut // down. If f is executed, the unit will not shut down until after f returns. func (u *Unit) Launch(f func()) { - select { - case <-u.ctx.Done(): + if !u.admit() { return - default: } - u.wg.Add(1) + go func() { defer u.wg.Done() f() @@ -137,7 +156,7 @@ func (u *Unit) Quit() <-chan struct{} { func (u *Unit) Done(actions ...func()) <-chan struct{} { done := make(chan struct{}) go func() { - u.cancel() + u.stopAdmitting() for _, action := range actions { action() } From fba722d3cfd6b7a457de4eeb33010dbff20031bc Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Mon, 18 Jul 2022 17:10:42 -0700 Subject: [PATCH 223/223] Improve error reporting for access api event endpoints --- engine/access/rpc/backend/backend_events.go | 13 +++++++------ engine/access/rpc/backend/backend_transactions.go | 1 + 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/engine/access/rpc/backend/backend_events.go b/engine/access/rpc/backend/backend_events.go index cc047606b3a..a41ccdb3f09 100644 --- a/engine/access/rpc/backend/backend_events.go +++ b/engine/access/rpc/backend/backend_events.go @@ -47,6 +47,7 @@ func (b *backendEvents) GetEventsForHeightRange( // get the latest sealed block header head, err := b.state.Sealed().Head() if err != nil { + // sealed block must be in the store, so return an Internal code even if we got NotFound return nil, status.Errorf(codes.Internal, "failed to get events: %v", err) } @@ -67,7 +68,7 @@ func (b *backendEvents) GetEventsForHeightRange( for i := startHeight; i <= endHeight; i++ { header, err := b.headers.ByHeight(i) if err != nil { - return nil, status.Errorf(codes.Internal, "failed to get events: %v", err) + return nil, convertStorageError(fmt.Errorf("failed to get events: %w", err)) } blockHeaders = append(blockHeaders, header) @@ -84,7 +85,7 @@ func (b *backendEvents) GetEventsForBlockIDs( ) ([]flow.BlockEvents, error) { if uint(len(blockIDs)) > b.maxHeightRange { - return nil, fmt.Errorf("requested block range (%d) exceeded maximum (%d)", len(blockIDs), b.maxHeightRange) + return nil, status.Errorf(codes.InvalidArgument, "requested block range (%d) exceeded maximum (%d)", len(blockIDs), b.maxHeightRange) } // find the block headers for all the block IDs @@ -92,7 +93,7 @@ func (b *backendEvents) GetEventsForBlockIDs( for _, blockID := range blockIDs { header, err := b.headers.ByBlockID(blockID) if err != nil { - return nil, status.Errorf(codes.Internal, "failed to get events: %v", err) + return nil, convertStorageError(fmt.Errorf("failed to get events: %w", err)) } blockHeaders = append(blockHeaders, header) @@ -160,15 +161,15 @@ func verifyAndConvertToAccessEvents(execEvents []*execproto.GetEventsForBlockIDs return nil, errors.New("number of results does not match number of blocks requested") } - reqestedBlockHeaderSet := map[string]*flow.Header{} + requestedBlockHeaderSet := map[string]*flow.Header{} for _, header := range requestedBlockHeaders { - reqestedBlockHeaderSet[header.ID().String()] = header + requestedBlockHeaderSet[header.ID().String()] = header } results := make([]flow.BlockEvents, len(execEvents)) for i, result := range execEvents { - header, expected := reqestedBlockHeaderSet[hex.EncodeToString(result.GetBlockId())] + header, expected := requestedBlockHeaderSet[hex.EncodeToString(result.GetBlockId())] if !expected { return nil, fmt.Errorf("unexpected blockID from exe node %x", result.GetBlockId()) } diff --git a/engine/access/rpc/backend/backend_transactions.go b/engine/access/rpc/backend/backend_transactions.go index 6c7c7ef5e61..c60c7fb12ad 100644 --- a/engine/access/rpc/backend/backend_transactions.go +++ b/engine/access/rpc/backend/backend_transactions.go @@ -66,6 +66,7 @@ func (b *backendTransactions) SendTransaction( // store the transaction locally err = b.transactions.Store(tx) if err != nil { + // TODO: why would this be InvalidArgument? return status.Error(codes.InvalidArgument, fmt.Sprintf("failed to store transaction: %v", err)) }