Skip to content

Commit

Permalink
Merge pull request #1098 from kaleido-io/go-1.19
Browse files Browse the repository at this point in the history
Add forward compatibility with Go 1.19
  • Loading branch information
peterbroadhurst authored Nov 9, 2022
2 parents d9a2300 + 4e2dd1c commit 0e6ae46
Show file tree
Hide file tree
Showing 15 changed files with 44 additions and 51 deletions.
5 changes: 2 additions & 3 deletions ffconfig/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ package main

import (
"fmt"
"io/ioutil"
"os"

"github.com/hyperledger/firefly/ffconfig/migrate"
Expand All @@ -38,7 +37,7 @@ var migrateCommand = &cobra.Command{
Use: "migrate",
Short: "Migrate a config file to the current version",
RunE: func(cmd *cobra.Command, args []string) error {
cfg, err := ioutil.ReadFile(cfgFile)
cfg, err := os.ReadFile(cfgFile)
if err != nil {
return err
}
Expand All @@ -50,7 +49,7 @@ var migrateCommand = &cobra.Command{
fmt.Print(string(out))
return nil
}
return ioutil.WriteFile(outFile, out, 0600)
return os.WriteFile(outFile, out, 0600)
},
}

Expand Down
4 changes: 2 additions & 2 deletions internal/data/data_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -186,8 +186,8 @@ func (dm *dataManager) getValidatorForDatatype(ctx context.Context, validator co
}

// GetMessageWithData performs a cached lookup of a message with all of the associated data.
// - Use this in performance sensitive code, but note mutable fields like the status of the
// message CANNOT be relied upon (due to the caching).
// - Use this in performance sensitive code, but note mutable fields like the status of the
// message CANNOT be relied upon (due to the caching).
func (dm *dataManager) GetMessageWithDataCached(ctx context.Context, msgID *fftypes.UUID, options ...CacheReadOption) (msg *core.Message, data core.DataArray, foundAllData bool, err error) {
if mce := dm.queryMessageCache(ctx, msgID, options...); mce != nil {
return mce.msg, mce.data, true, nil
Expand Down
22 changes: 11 additions & 11 deletions internal/events/aggregator_batch_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,17 +81,17 @@ type dispatchedMessage struct {
// batchState is the object that tracks the in-memory state that builds up while processing a batch of pins,
// that needs to be reconciled at the point the batch closes.
// There are three phases:
// 1. Dispatch: Determines if messages are blocked, or can be dispatched. Calls the appropriate dispatch
// actions for that message type. Reads initial `pin` state for contexts from the DB, and then
// updates this in-memory throughout the batch, ready for flushing in the Finalize phase.
// Runs in a database operation group/tranaction.
// 2. Pre-finalize: Runs any PreFinalize callbacks registered by the handlers in (1).
// Intended to be used for cross-microservice REST/GRPC etc. calls that have side-effects.
// Runs outside any database operation group/tranaction.
// 3. Finalize: Flushes the `pin` state calculated in phase (1), and any Finalize actions registered by handlers
// during phase (1) or (2).
// Runs in a database operation group/tranaction, which will be the same as phase (1) if there
// are no pre-finalize handlers registered.
// 1. Dispatch: Determines if messages are blocked, or can be dispatched. Calls the appropriate dispatch
// actions for that message type. Reads initial `pin` state for contexts from the DB, and then
// updates this in-memory throughout the batch, ready for flushing in the Finalize phase.
// Runs in a database operation group/tranaction.
// 2. Pre-finalize: Runs any PreFinalize callbacks registered by the handlers in (1).
// Intended to be used for cross-microservice REST/GRPC etc. calls that have side-effects.
// Runs outside any database operation group/tranaction.
// 3. Finalize: Flushes the `pin` state calculated in phase (1), and any Finalize actions registered by handlers
// during phase (1) or (2).
// Runs in a database operation group/tranaction, which will be the same as phase (1) if there
// are no pre-finalize handlers registered.
type batchState struct {
core.BatchState

Expand Down
10 changes: 5 additions & 5 deletions internal/events/tokens_approved.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,11 @@ import (
// This will ensure that the original LocalID provided to the user can later be used in a lookup, and also causes requests that
// use "confirm=true" to resolve as expected.
// Must follow these rules to reuse the LocalID:
// - The transaction ID on the approval must match a transaction+operation initiated by this node.
// - The connector and pool for this event must match the connector and pool targeted by the initial operation. Connectors are
// allowed to trigger side-effects in other pools, but only the event from the targeted pool should use the original LocalID.
// - The LocalID must not have been used yet. Connectors are allowed to emit multiple events in response to a single operation,
// but only the first of them can use the original LocalID.
// - The transaction ID on the approval must match a transaction+operation initiated by this node.
// - The connector and pool for this event must match the connector and pool targeted by the initial operation. Connectors are
// allowed to trigger side-effects in other pools, but only the event from the targeted pool should use the original LocalID.
// - The LocalID must not have been used yet. Connectors are allowed to emit multiple events in response to a single operation,
// but only the first of them can use the original LocalID.
func (em *eventManager) loadApprovalID(ctx context.Context, tx *fftypes.UUID, approval *core.TokenApproval) (*fftypes.UUID, error) {
// Find a matching operation within the transaction
fb := database.OperationQueryFactory.NewFilter(ctx)
Expand Down
10 changes: 5 additions & 5 deletions internal/events/tokens_transferred.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,11 @@ import (
// This will ensure that the original LocalID provided to the user can later be used in a lookup, and also causes requests that
// use "confirm=true" to resolve as expected.
// Must follow these rules to reuse the LocalID:
// - The transaction ID on the transfer must match a transaction+operation initiated by this node.
// - The connector and pool for this event must match the connector and pool targeted by the initial operation. Connectors are
// allowed to trigger side-effects in other pools, but only the event from the targeted pool should use the original LocalID.
// - The LocalID must not have been used yet. Connectors are allowed to emit multiple events in response to a single operation,
// but only the first of them can use the original LocalID.
// - The transaction ID on the transfer must match a transaction+operation initiated by this node.
// - The connector and pool for this event must match the connector and pool targeted by the initial operation. Connectors are
// allowed to trigger side-effects in other pools, but only the event from the targeted pool should use the original LocalID.
// - The LocalID must not have been used yet. Connectors are allowed to emit multiple events in response to a single operation,
// but only the first of them can use the original LocalID.
func (em *eventManager) loadTransferID(ctx context.Context, tx *fftypes.UUID, transfer *core.TokenTransfer) (*fftypes.UUID, error) {
// Find a matching operation within the transaction
fb := database.OperationQueryFactory.NewFilter(ctx)
Expand Down
4 changes: 2 additions & 2 deletions internal/events/websockets/websocket_connection.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ package websockets
import (
"context"
"encoding/json"
"io/ioutil"
"io"
"net/http"
"sync"

Expand Down Expand Up @@ -138,7 +138,7 @@ func (wc *websocketConnection) receiveLoop() {
var msgHeader core.WSActionBase
_, reader, err := wc.wsConn.NextReader()
if err == nil {
msgData, err = ioutil.ReadAll(reader)
msgData, err = io.ReadAll(reader)
if err == nil {
err = json.Unmarshal(msgData, &msgHeader)
if err != nil {
Expand Down
3 changes: 1 addition & 2 deletions internal/shareddownload/operations.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package shareddownload
import (
"context"
"io"
"io/ioutil"

"github.com/docker/go-units"
"github.com/hyperledger/firefly-common/pkg/fftypes"
Expand Down Expand Up @@ -122,7 +121,7 @@ func (dm *downloadManager) downloadBatch(ctx context.Context, data downloadBatch
// Read from the stream up to the limit
maxReadLimit := dm.broadcastBatchPayloadLimit + 1024
limitedReader := io.LimitReader(reader, maxReadLimit)
batchBytes, err := ioutil.ReadAll(limitedReader)
batchBytes, err := io.ReadAll(limitedReader)
if err != nil {
return nil, false, i18n.WrapError(ctx, err, coremsgs.MsgDownloadSharedFailed, data.PayloadRef)
}
Expand Down
4 changes: 2 additions & 2 deletions internal/spievents/websockets.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ package spievents
import (
"context"
"encoding/json"
"io/ioutil"
"io"
"sync"
"time"

Expand Down Expand Up @@ -153,7 +153,7 @@ func (wc *webSocket) receiveLoop() {
var cmd core.WSChangeEventCommand
_, reader, err := wc.wsConn.NextReader()
if err == nil {
msgData, err = ioutil.ReadAll(reader)
msgData, err = io.ReadAll(reader)
if err == nil {
err = json.Unmarshal(msgData, &cmd)
}
Expand Down
5 changes: 3 additions & 2 deletions internal/tokens/fftokens/fftokens.go
Original file line number Diff line number Diff line change
Expand Up @@ -560,9 +560,10 @@ func (ft *FFTokens) eventLoop() {
}

// Parse a JSON error of the form:
// {"error": "Bad Request", "message": "Field 'x' is required"}
// {"error": "Bad Request", "message": "Field 'x' is required"}
// into a message of the form:
// "Bad Request: Field 'x' is required"
//
// "Bad Request: Field 'x' is required"
func wrapError(ctx context.Context, errRes *tokenError, res *resty.Response, err error) error {
if errRes != nil && errRes.Message != "" {
if errRes.Error != "" {
Expand Down
1 change: 0 additions & 1 deletion pkg/core/pin.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ import "github.com/hyperledger/firefly-common/pkg/fftypes"
// moving separately to the batch. If we get the private message, then the batch,
// before receiving the blob data - we have to upgrade a batch-park, to a pin-park.
// This is because the sequence must be in the order the pins arrive.
//
type Pin struct {
Sequence int64 `ffstruct:"Pin" json:"sequence"`
Namespace string `ffstruct:"Pin" json:"namespace"`
Expand Down
2 changes: 0 additions & 2 deletions pkg/database/plugin.go
Original file line number Diff line number Diff line change
Expand Up @@ -522,7 +522,6 @@ type iChartCollection interface {
// interface.
// For SQL databases the process of adding a new database is simplified via the common SQL layer.
// For NoSQL databases, the code should be straight forward to map the collections, indexes, and operations.
//
type PersistenceInterface interface {
core.Named

Expand Down Expand Up @@ -655,7 +654,6 @@ type PostCompletionHook func()
// Events are emitted locally to the individual FireFly core process. However, a WebSocket interface is
// available for remote listening to these events. That allows the UI to listen to the events, as well as
// providing a building block for a cluster of FireFly servers to directly propgate events to each other.
//
type Callbacks interface {
// OrderedUUIDCollectionNSEvent emits the sequence on insert, but it will be -1 on update
OrderedUUIDCollectionNSEvent(resType OrderedUUIDCollectionNS, eventType core.ChangeEventType, namespace string, id *fftypes.UUID, sequence int64)
Expand Down
9 changes: 4 additions & 5 deletions pkg/dataexchange/plugin.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,13 +47,12 @@ import (
// - Can be stored and retrieved separately from their transfer
// - Transfers are initiated via reference (not in-line data)
// - Are hashed by the DX plugin using the same hashing algorithm as FireFly (SHA256)
// - DX plugins can mainain their own internal IDs for Blobs within the following requirements:
// - DX plugins can maintain their own internal IDs for Blobs within the following requirements:
// - Given a namespace and ID, map to a "payloadRef" string (<1024chars) that allows that same payload to be retrieved using only that payloadRef
// - Example would be a logical filesystem path like "local/namespace/ID"
// - When data is recevied from other members in the network, be able to return the hash when provided with the remote peerID string, namespace and ID
// - Could be done by having a data store to resolve the transfers, or simply a deterministic path to metadata like "receive/peerID/namespace/ID"
// - Example would be a logical filesystem path like "local/namespace/ID"
// - When data is received from other members in the network, be able to return the hash when provided with the remote peerID string, namespace and ID
// - Could be done by having a data store to resolve the transfers, or simply a deterministic path to metadata like "receive/peerID/namespace/ID"
// - Events triggered for arrival of blobs must contain the payloadRef, and the hash
//
type Plugin interface {
core.Named

Expand Down
3 changes: 1 addition & 2 deletions test/e2e/client/restclient.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/big"
"net/http"
"net/url"
Expand Down Expand Up @@ -222,7 +221,7 @@ func (client *FireFlyClient) GetBlob(t *testing.T, data *core.Data, expectedStat
Get(path)
require.NoError(t, err)
require.Equal(t, expectedStatus, resp.StatusCode(), "GET %s [%d]: %s", path, resp.StatusCode(), resp.String())
blob, err := ioutil.ReadAll(resp.RawBody())
blob, err := io.ReadAll(resp.RawBody())
require.NoError(t, err)
return blob
}
Expand Down
3 changes: 1 addition & 2 deletions test/e2e/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package e2e
import (
"crypto/rand"
"fmt"
"io/ioutil"
"os"
"testing"

Expand All @@ -30,7 +29,7 @@ import (
)

func ReadConfig(t *testing.T, configFile string) map[string]interface{} {
yfile, err := ioutil.ReadFile(configFile)
yfile, err := os.ReadFile(configFile)
assert.NoError(t, err)
data := make(map[string]interface{})
err = yaml.Unmarshal(yfile, &data)
Expand Down
10 changes: 5 additions & 5 deletions test/e2e/stack.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ package e2e

import (
"encoding/json"
"io/ioutil"
"os"
)

type Stack struct {
Expand Down Expand Up @@ -47,7 +47,7 @@ type Member struct {
}

func GetMemberPort(filename string, n int) (int, error) {
jsonBytes, err := ioutil.ReadFile(filename)
jsonBytes, err := os.ReadFile(filename)
if err != nil {
return 0, err
}
Expand All @@ -62,7 +62,7 @@ func GetMemberPort(filename string, n int) (int, error) {
}

func GetMemberHostname(filename string, n int) (string, error) {
jsonBytes, err := ioutil.ReadFile(filename)
jsonBytes, err := os.ReadFile(filename)
if err != nil {
return "", err
}
Expand All @@ -77,7 +77,7 @@ func GetMemberHostname(filename string, n int) (string, error) {
}

func ReadStackFile(filename string) (*Stack, error) {
jsonBytes, err := ioutil.ReadFile(filename)
jsonBytes, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
Expand All @@ -100,7 +100,7 @@ func ReadStackFile(filename string) (*Stack, error) {
}

func ReadStackStateFile(filename string) (*StackState, error) {
jsonBytes, err := ioutil.ReadFile(filename)
jsonBytes, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
Expand Down

0 comments on commit 0e6ae46

Please sign in to comment.