From 4f874d9324e3addb903470db0ea4ea7e7ffde7a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Colin=20Axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Mon, 31 May 2021 14:03:53 +0200 Subject: [PATCH 01/21] add in-place migrations Prunes solomachines and expired tendermint consensus states via an x/upgrade --- modules/core/02-client/keeper/migrations.go | 25 +++++++ modules/core/02-client/legacy/v100/store.go | 69 +++++++++++++++++++ modules/core/module.go | 4 ++ .../07-tendermint/types/store.go | 29 ++++++++ 4 files changed, 127 insertions(+) create mode 100644 modules/core/02-client/keeper/migrations.go create mode 100644 modules/core/02-client/legacy/v100/store.go diff --git a/modules/core/02-client/keeper/migrations.go b/modules/core/02-client/keeper/migrations.go new file mode 100644 index 00000000000..a3b1b0e894d --- /dev/null +++ b/modules/core/02-client/keeper/migrations.go @@ -0,0 +1,25 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + v100 "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper Keeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper Keeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +// This migration prunes: +// - solo machine clients +// - expired tendermint consensus states +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + return v100.MigrateStore(ctx, m.keeper.storeKey, m.keeper.cdc) +} diff --git a/modules/core/02-client/legacy/v100/store.go b/modules/core/02-client/legacy/v100/store.go new file mode 100644 index 00000000000..151f3980b20 --- /dev/null +++ b/modules/core/02-client/legacy/v100/store.go @@ -0,0 +1,69 @@ +package v100 + +import ( + "fmt" + "strings" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/modules/core/02-client/types" + host "github.com/cosmos/ibc-go/modules/core/24-host" + "github.com/cosmos/ibc-go/modules/core/exported" + ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types" +) + +// MigrateStore performs in-place store migrations from SDK v0.40 of the IBC module to v1.0.0 of ibc-go. +// The migration includes: +// +// - Pruning solo machine clients +// - Pruning expired tendermint consensus states +func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) (err error) { + store := ctx.KVStore(storeKey) + + iterator := sdk.KVStorePrefixIterator(store, host.KeyClientStorePrefix) + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + keySplit := strings.Split(string(iterator.Key()), "/") + if keySplit[len(keySplit)-1] != host.KeyClientState { + continue + } + + // key is ibc/{clientid}/clientState + // Thus, keySplit[1] is clientID + clientID := keySplit[1] + clientState := types.MustUnmarshalClientState(cdc, iterator.Value()) + + clientType, _, err := types.ParseClientIdentifier(clientID) + if err != nil { + return err + } + + switch clientType { + case exported.Solomachine: + store.Delete([]byte(fmt.Sprintf("%s/%s", host.KeyClientStorePrefix, clientID))) + + case exported.Tendermint: + clientPrefix := []byte(fmt.Sprintf("%s/%s/", host.KeyClientStorePrefix, clientID)) + clientStore := prefix.NewStore(ctx.KVStore(storeKey), clientPrefix) + + // ensure client is tendermint type + tmClientState, ok := clientState.(*ibctmtypes.ClientState) + if !ok { + panic("client with identifier '07-tendermint' is not tendermint type!") + } + if err = ibctmtypes.PruneAllExpiredConsensusStates(ctx, clientStore, cdc, tmClientState); err != nil { + return err + } + + default: + continue + } + + } + + return nil + +} diff --git a/modules/core/module.go b/modules/core/module.go index 1d338dcb69e..dac61e776cf 100644 --- a/modules/core/module.go +++ b/modules/core/module.go @@ -19,6 +19,7 @@ import ( "github.com/cosmos/cosmos-sdk/types/module" simtypes "github.com/cosmos/cosmos-sdk/types/simulation" ibcclient "github.com/cosmos/ibc-go/modules/core/02-client" + clientkeeper "github.com/cosmos/ibc-go/modules/core/02-client/keeper" clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types" connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types" channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types" @@ -136,6 +137,9 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { connectiontypes.RegisterMsgServer(cfg.MsgServer(), am.keeper) channeltypes.RegisterMsgServer(cfg.MsgServer(), am.keeper) types.RegisterQueryService(cfg.QueryServer(), am.keeper) + + m := clientkeeper.NewMigrator(am.keeper.ClientKeeper) + cfg.RegisterMigration(host.ModuleName, 1, m.Migrate1to2) } // InitGenesis performs genesis initialization for the ibc module. It returns diff --git a/modules/light-clients/07-tendermint/types/store.go b/modules/light-clients/07-tendermint/types/store.go index e86c8144931..0686457a75f 100644 --- a/modules/light-clients/07-tendermint/types/store.go +++ b/modules/light-clients/07-tendermint/types/store.go @@ -264,6 +264,35 @@ func GetPreviousConsensusState(clientStore sdk.KVStore, cdc codec.BinaryCodec, h return getTmConsensusState(clientStore, cdc, csKey) } +// PruneAllExpiredConsensusStates iterates over all consensus states for a given +// client store. If a consensus state is expired, it is deleted and its metadata +// is deleted. +func PruneAllExpiredConsensusStates( + ctx sdk.Context, clientStore sdk.KVStore, + cdc codec.BinaryCodec, clientState *ClientState, +) (err error) { + pruneCb := func(height exported.Height) bool { + consState, err := GetConsensusState(clientStore, cdc, height) + // this error should never occur + if err != nil { + return true + } + + if clientState.IsExpired(consState.Timestamp, ctx.BlockTime()) { + deleteConsensusState(clientStore, height) + deleteConsensusMetadata(clientStore, height) + } + + return false + } + IterateConsensusStateAscending(clientStore, pruneCb) + if err != nil { + return err + } + + return nil +} + // Helper function for GetNextConsensusState and GetPreviousConsensusState func getTmConsensusState(clientStore sdk.KVStore, cdc codec.BinaryCodec, key []byte) (*ConsensusState, bool) { bz := clientStore.Get(key) From f8ca01300e9821e170748f544ee40fcf188e8bb9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Colin=20Axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Wed, 2 Jun 2021 13:27:54 +0200 Subject: [PATCH 02/21] update migrations fix iteration bug remove solo machine connections remove solo machine channels --- modules/core/02-client/keeper/migrations.go | 5 ++ modules/core/02-client/legacy/v100/store.go | 50 ++++++++++++++++--- .../core/03-connection/keeper/migrations.go | 25 ++++++++++ .../core/03-connection/legacy/v100/store.go | 41 +++++++++++++++ modules/core/04-channel/keeper/migrations.go | 25 ++++++++++ modules/core/04-channel/legacy/v100/store.go | 41 +++++++++++++++ modules/core/keeper/migrations.go | 47 +++++++++++++++++ .../07-tendermint/types/store.go | 11 +++- 8 files changed, 237 insertions(+), 8 deletions(-) create mode 100644 modules/core/03-connection/keeper/migrations.go create mode 100644 modules/core/03-connection/legacy/v100/store.go create mode 100644 modules/core/04-channel/keeper/migrations.go create mode 100644 modules/core/04-channel/legacy/v100/store.go create mode 100644 modules/core/keeper/migrations.go diff --git a/modules/core/02-client/keeper/migrations.go b/modules/core/02-client/keeper/migrations.go index a3b1b0e894d..6200195acb6 100644 --- a/modules/core/02-client/keeper/migrations.go +++ b/modules/core/02-client/keeper/migrations.go @@ -19,7 +19,12 @@ func NewMigrator(keeper Keeper) Migrator { // Migrate1to2 migrates from version 1 to 2. // This migration prunes: // - solo machine clients +// - connections using solo machines +// - channels using solo machines // - expired tendermint consensus states +// +// Connections are removed if the associated client does not exist. +// Channels are removed if the associated connection does not exist. func (m Migrator) Migrate1to2(ctx sdk.Context) error { return v100.MigrateStore(ctx, m.keeper.storeKey, m.keeper.cdc) } diff --git a/modules/core/02-client/legacy/v100/store.go b/modules/core/02-client/legacy/v100/store.go index 151f3980b20..8318584af16 100644 --- a/modules/core/02-client/legacy/v100/store.go +++ b/modules/core/02-client/legacy/v100/store.go @@ -9,6 +9,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/ibc-go/modules/core/02-client/types" + clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types" host "github.com/cosmos/ibc-go/modules/core/24-host" "github.com/cosmos/ibc-go/modules/core/exported" ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types" @@ -21,9 +22,10 @@ import ( // - Pruning expired tendermint consensus states func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) (err error) { store := ctx.KVStore(storeKey) - iterator := sdk.KVStorePrefixIterator(store, host.KeyClientStorePrefix) + var clients []clienttypes.IdentifiedClientState + defer iterator.Close() for ; iterator.Valid(); iterator.Next() { keySplit := strings.Split(string(iterator.Key()), "/") @@ -31,23 +33,35 @@ func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) continue } - // key is ibc/{clientid}/clientState + // key is clients/{clientid}/clientState // Thus, keySplit[1] is clientID clientID := keySplit[1] clientState := types.MustUnmarshalClientState(cdc, iterator.Value()) + clients = append(clients, clienttypes.NewIdentifiedClientState(clientID, clientState)) - clientType, _, err := types.ParseClientIdentifier(clientID) + } + + for _, client := range clients { + clientType, _, err := types.ParseClientIdentifier(client.ClientId) if err != nil { return err } + clientPrefix := []byte(fmt.Sprintf("%s/%s/", host.KeyClientStorePrefix, client.ClientId)) + clientStore := prefix.NewStore(ctx.KVStore(storeKey), clientPrefix) + switch clientType { case exported.Solomachine: - store.Delete([]byte(fmt.Sprintf("%s/%s", host.KeyClientStorePrefix, clientID))) + pruneSolomachine(clientStore) + store.Delete([]byte(fmt.Sprintf("%s/%s", host.KeyClientStorePrefix, client.ClientId))) case exported.Tendermint: - clientPrefix := []byte(fmt.Sprintf("%s/%s/", host.KeyClientStorePrefix, clientID)) + clientPrefix := []byte(fmt.Sprintf("%s/%s/", host.KeyClientStorePrefix, client.ClientId)) clientStore := prefix.NewStore(ctx.KVStore(storeKey), clientPrefix) + clientState, err := types.UnpackClientState(client.ClientState) + if err != nil { + return err + } // ensure client is tendermint type tmClientState, ok := clientState.(*ibctmtypes.ClientState) @@ -61,9 +75,33 @@ func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) default: continue } - } return nil +} + +// pruneSolomachine removes the client state and all consensus states +// stored in the provided clientStore +func pruneSolomachine(clientStore sdk.KVStore) { + // delete client state + clientStore.Delete(host.ClientStateKey()) + // collect consensus states to be pruned + iterator := sdk.KVStorePrefixIterator(clientStore, []byte(host.KeyConsensusStatePrefix)) + var heights []exported.Height + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + keySplit := strings.Split(string(iterator.Key()), "/") + // key is in the format "clients//consensusStates/" + if len(keySplit) != 4 || keySplit[2] != string(host.KeyConsensusStatePrefix) { + continue + } + heights = append(heights, types.MustParseHeight(keySplit[3])) + } + + // delete all consensus states + for _, height := range heights { + clientStore.Delete(host.ConsensusStateKey(height)) + } } diff --git a/modules/core/03-connection/keeper/migrations.go b/modules/core/03-connection/keeper/migrations.go new file mode 100644 index 00000000000..69e8d6067a7 --- /dev/null +++ b/modules/core/03-connection/keeper/migrations.go @@ -0,0 +1,25 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/modules/core/03-connection/legacy/v100" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper Keeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper Keeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +// This migration prunes: +// +// - connections whose client has been deleted (solomachines) +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + return v100.MigrateStore(ctx, m.keeper.storeKey, m.keeper.cdc) +} diff --git a/modules/core/03-connection/legacy/v100/store.go b/modules/core/03-connection/legacy/v100/store.go new file mode 100644 index 00000000000..15adf63d0ee --- /dev/null +++ b/modules/core/03-connection/legacy/v100/store.go @@ -0,0 +1,41 @@ +package v100 + +import ( + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/modules/core/03-connection/types" + host "github.com/cosmos/ibc-go/modules/core/24-host" +) + +// MigrateStore performs in-place store migrations from SDK v0.40 of the IBC module to v1.0.0 of ibc-go. +// The migration includes: +// +// - Pruning all connections whose client has been removed (solo machines) +func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) (err error) { + var connections []types.IdentifiedConnection + + // clients and connections use the same store key + store := ctx.KVStore(storeKey) + iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyConnectionPrefix)) + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + var connection types.ConnectionEnd + cdc.MustUnmarshal(iterator.Value(), &connection) + + bz := store.Get(host.FullClientStateKey(connection.ClientId)) + if bz == nil { + // client has been pruned, remove connection as well + connectionID := host.MustParseConnectionPath(string(iterator.Key())) + connections = append(connections, types.NewIdentifiedConnection(connectionID, connection)) + } + + } + + for _, conn := range connections { + store.Delete(host.ConnectionKey(conn.Id)) + } + + return nil +} diff --git a/modules/core/04-channel/keeper/migrations.go b/modules/core/04-channel/keeper/migrations.go new file mode 100644 index 00000000000..71e861ac091 --- /dev/null +++ b/modules/core/04-channel/keeper/migrations.go @@ -0,0 +1,25 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/modules/core/04-channel/legacy/v100" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper Keeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper Keeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +// This migration prunes: +// +// - channels whose connection has been deleted (solomachines) +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + return v100.MigrateStore(ctx, m.keeper.storeKey, m.keeper.cdc) +} diff --git a/modules/core/04-channel/legacy/v100/store.go b/modules/core/04-channel/legacy/v100/store.go new file mode 100644 index 00000000000..4b6188de451 --- /dev/null +++ b/modules/core/04-channel/legacy/v100/store.go @@ -0,0 +1,41 @@ +package v100 + +import ( + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/modules/core/04-channel/types" + host "github.com/cosmos/ibc-go/modules/core/24-host" +) + +// MigrateStore performs in-place store migrations from SDK v0.40 of the IBC module to v1.0.0 of ibc-go. +// The migration includes: +// +// - Pruning all channels whose connection has been removed (solo machines) +func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) (err error) { + var channels []types.IdentifiedChannel + + // connections and channels use the same store key + store := ctx.KVStore(storeKey) + + iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyChannelEndPrefix)) + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + var channel types.Channel + cdc.MustUnmarshal(iterator.Value(), &channel) + + bz := store.Get(host.ConnectionKey(channel.ConnectionHops[0])) + if bz == nil { + // connection has been pruned, remove channel as well + portID, channelID := host.MustParseChannelPath(string(iterator.Key())) + channels = append(channels, types.NewIdentifiedChannel(portID, channelID, channel)) + } + } + + for _, channel := range channels { + store.Delete(host.ChannelKey(channel.PortId, channel.ChannelId)) + } + + return nil +} diff --git a/modules/core/keeper/migrations.go b/modules/core/keeper/migrations.go new file mode 100644 index 00000000000..526fd404192 --- /dev/null +++ b/modules/core/keeper/migrations.go @@ -0,0 +1,47 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + clientkeeper "github.com/cosmos/ibc-go/modules/core/02-client/keeper" + connectionkeeper "github.com/cosmos/ibc-go/modules/core/03-connection/keeper" + channelkeeper "github.com/cosmos/ibc-go/modules/core/04-channel/keeper" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper Keeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper Keeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +// This migration prunes: +// - solo machine clients +// - connections using solo machines +// - channels using solo machines +// - expired tendermint consensus states +// +// Connections are removed if the associated client does not exist. +// Channels are removed if the associated connection does not exist. +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + clientMigrator := clientkeeper.NewMigrator(m.keeper.ClientKeeper) + if err := clientMigrator.Migrate1to2(ctx); err != nil { + return err + } + + connectionMigrator := connectionkeeper.NewMigrator(m.keeper.ConnectionKeeper) + if err := connectionMigrator.Migrate1to2(ctx); err != nil { + return err + } + + channelMigrator := channelkeeper.NewMigrator(m.keeper.ChannelKeeper) + if err := channelMigrator.Migrate1to2(ctx); err != nil { + return err + } + + return nil +} diff --git a/modules/light-clients/07-tendermint/types/store.go b/modules/light-clients/07-tendermint/types/store.go index 0686457a75f..e3cabcebc97 100644 --- a/modules/light-clients/07-tendermint/types/store.go +++ b/modules/light-clients/07-tendermint/types/store.go @@ -271,6 +271,8 @@ func PruneAllExpiredConsensusStates( ctx sdk.Context, clientStore sdk.KVStore, cdc codec.BinaryCodec, clientState *ClientState, ) (err error) { + var heights []exported.Height + pruneCb := func(height exported.Height) bool { consState, err := GetConsensusState(clientStore, cdc, height) // this error should never occur @@ -279,17 +281,22 @@ func PruneAllExpiredConsensusStates( } if clientState.IsExpired(consState.Timestamp, ctx.BlockTime()) { - deleteConsensusState(clientStore, height) - deleteConsensusMetadata(clientStore, height) + heights = append(heights, height) } return false } + IterateConsensusStateAscending(clientStore, pruneCb) if err != nil { return err } + for _, height := range heights { + deleteConsensusState(clientStore, height) + deleteConsensusMetadata(clientStore, height) + } + return nil } From 29c1c48b4573ac0e8694a0d53a50482687682c22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Colin=20Axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Mon, 7 Jun 2021 13:17:52 +0200 Subject: [PATCH 03/21] migrate solomachine from v1 to v2 during in place migration Regenerate v1 solo machine definition in 02-client legacy Migrate from v1 to v2 solo machine client state Prune v1 solo machine consensus states --- docs/ibc/proto-docs.md | 338 ++ go.mod | 1 + .../02-client/legacy/v100/solomachine.pb.go | 4121 +++++++++++++++++ modules/core/02-client/legacy/v100/store.go | 62 +- .../core/03-connection/keeper/migrations.go | 25 - .../core/03-connection/legacy/v100/store.go | 41 - modules/core/04-channel/keeper/migrations.go | 25 - modules/core/04-channel/legacy/v100/store.go | 41 - modules/core/keeper/migrations.go | 18 +- .../07-tendermint/types/tendermint.pb.go | 4 +- .../solomachine/v1/solomachine.proto | 189 + 11 files changed, 4701 insertions(+), 164 deletions(-) create mode 100644 modules/core/02-client/legacy/v100/solomachine.pb.go delete mode 100644 modules/core/03-connection/keeper/migrations.go delete mode 100644 modules/core/03-connection/legacy/v100/store.go delete mode 100644 modules/core/04-channel/keeper/migrations.go delete mode 100644 modules/core/04-channel/legacy/v100/store.go create mode 100644 proto/ibc/lightclients/solomachine/v1/solomachine.proto diff --git a/docs/ibc/proto-docs.md b/docs/ibc/proto-docs.md index 701d4684940..b3937fc3d0e 100644 --- a/docs/ibc/proto-docs.md +++ b/docs/ibc/proto-docs.md @@ -195,6 +195,26 @@ - [ibc/lightclients/localhost/v1/localhost.proto](#ibc/lightclients/localhost/v1/localhost.proto) - [ClientState](#ibc.lightclients.localhost.v1.ClientState) +- [ibc/lightclients/solomachine/v1/solomachine.proto](#ibc/lightclients/solomachine/v1/solomachine.proto) + - [ChannelStateData](#ibc.lightclients.solomachine.v1.ChannelStateData) + - [ClientState](#ibc.lightclients.solomachine.v1.ClientState) + - [ClientStateData](#ibc.lightclients.solomachine.v1.ClientStateData) + - [ConnectionStateData](#ibc.lightclients.solomachine.v1.ConnectionStateData) + - [ConsensusState](#ibc.lightclients.solomachine.v1.ConsensusState) + - [ConsensusStateData](#ibc.lightclients.solomachine.v1.ConsensusStateData) + - [Header](#ibc.lightclients.solomachine.v1.Header) + - [HeaderData](#ibc.lightclients.solomachine.v1.HeaderData) + - [Misbehaviour](#ibc.lightclients.solomachine.v1.Misbehaviour) + - [NextSequenceRecvData](#ibc.lightclients.solomachine.v1.NextSequenceRecvData) + - [PacketAcknowledgementData](#ibc.lightclients.solomachine.v1.PacketAcknowledgementData) + - [PacketCommitmentData](#ibc.lightclients.solomachine.v1.PacketCommitmentData) + - [PacketReceiptAbsenceData](#ibc.lightclients.solomachine.v1.PacketReceiptAbsenceData) + - [SignBytes](#ibc.lightclients.solomachine.v1.SignBytes) + - [SignatureAndData](#ibc.lightclients.solomachine.v1.SignatureAndData) + - [TimestampedSignatureData](#ibc.lightclients.solomachine.v1.TimestampedSignatureData) + + - [DataType](#ibc.lightclients.solomachine.v1.DataType) + - [ibc/lightclients/solomachine/v2/solomachine.proto](#ibc/lightclients/solomachine/v2/solomachine.proto) - [ChannelStateData](#ibc.lightclients.solomachine.v2.ChannelStateData) - [ClientState](#ibc.lightclients.solomachine.v2.ClientState) @@ -2919,6 +2939,324 @@ access to keys outside the client prefix. + +

Top

+ +## ibc/lightclients/solomachine/v1/solomachine.proto + + + + + +### ChannelStateData +ChannelStateData returns the SignBytes data for channel state +verification. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `path` | [bytes](#bytes) | | | +| `channel` | [ibc.core.channel.v1.Channel](#ibc.core.channel.v1.Channel) | | | + + + + + + + + +### ClientState +ClientState defines a solo machine client that tracks the current consensus +state and if the client is frozen. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `sequence` | [uint64](#uint64) | | latest sequence of the client state | +| `frozen_sequence` | [uint64](#uint64) | | frozen sequence of the solo machine | +| `consensus_state` | [ConsensusState](#ibc.lightclients.solomachine.v1.ConsensusState) | | | +| `allow_update_after_proposal` | [bool](#bool) | | when set to true, will allow governance to update a solo machine client. The client will be unfrozen if it is frozen. | + + + + + + + + +### ClientStateData +ClientStateData returns the SignBytes data for client state verification. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `path` | [bytes](#bytes) | | | +| `client_state` | [google.protobuf.Any](#google.protobuf.Any) | | | + + + + + + + + +### ConnectionStateData +ConnectionStateData returns the SignBytes data for connection state +verification. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `path` | [bytes](#bytes) | | | +| `connection` | [ibc.core.connection.v1.ConnectionEnd](#ibc.core.connection.v1.ConnectionEnd) | | | + + + + + + + + +### ConsensusState +ConsensusState defines a solo machine consensus state. The sequence of a +consensus state is contained in the "height" key used in storing the +consensus state. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `public_key` | [google.protobuf.Any](#google.protobuf.Any) | | public key of the solo machine | +| `diversifier` | [string](#string) | | diversifier allows the same public key to be re-used across different solo machine clients (potentially on different chains) without being considered misbehaviour. | +| `timestamp` | [uint64](#uint64) | | | + + + + + + + + +### ConsensusStateData +ConsensusStateData returns the SignBytes data for consensus state +verification. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `path` | [bytes](#bytes) | | | +| `consensus_state` | [google.protobuf.Any](#google.protobuf.Any) | | | + + + + + + + + +### Header +Header defines a solo machine consensus header + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `sequence` | [uint64](#uint64) | | sequence to update solo machine public key at | +| `timestamp` | [uint64](#uint64) | | | +| `signature` | [bytes](#bytes) | | | +| `new_public_key` | [google.protobuf.Any](#google.protobuf.Any) | | | +| `new_diversifier` | [string](#string) | | | + + + + + + + + +### HeaderData +HeaderData returns the SignBytes data for update verification. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `new_pub_key` | [google.protobuf.Any](#google.protobuf.Any) | | header public key | +| `new_diversifier` | [string](#string) | | header diversifier | + + + + + + + + +### Misbehaviour +Misbehaviour defines misbehaviour for a solo machine which consists +of a sequence and two signatures over different messages at that sequence. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `client_id` | [string](#string) | | | +| `sequence` | [uint64](#uint64) | | | +| `signature_one` | [SignatureAndData](#ibc.lightclients.solomachine.v1.SignatureAndData) | | | +| `signature_two` | [SignatureAndData](#ibc.lightclients.solomachine.v1.SignatureAndData) | | | + + + + + + + + +### NextSequenceRecvData +NextSequenceRecvData returns the SignBytes data for verification of the next +sequence to be received. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `path` | [bytes](#bytes) | | | +| `next_seq_recv` | [uint64](#uint64) | | | + + + + + + + + +### PacketAcknowledgementData +PacketAcknowledgementData returns the SignBytes data for acknowledgement +verification. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `path` | [bytes](#bytes) | | | +| `acknowledgement` | [bytes](#bytes) | | | + + + + + + + + +### PacketCommitmentData +PacketCommitmentData returns the SignBytes data for packet commitment +verification. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `path` | [bytes](#bytes) | | | +| `commitment` | [bytes](#bytes) | | | + + + + + + + + +### PacketReceiptAbsenceData +PacketReceiptAbsenceData returns the SignBytes data for +packet receipt absence verification. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `path` | [bytes](#bytes) | | | + + + + + + + + +### SignBytes +SignBytes defines the signed bytes used for signature verification. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `sequence` | [uint64](#uint64) | | | +| `timestamp` | [uint64](#uint64) | | | +| `diversifier` | [string](#string) | | | +| `data_type` | [DataType](#ibc.lightclients.solomachine.v1.DataType) | | type of the data used | +| `data` | [bytes](#bytes) | | marshaled data | + + + + + + + + +### SignatureAndData +SignatureAndData contains a signature and the data signed over to create that +signature. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `signature` | [bytes](#bytes) | | | +| `data_type` | [DataType](#ibc.lightclients.solomachine.v1.DataType) | | | +| `data` | [bytes](#bytes) | | | +| `timestamp` | [uint64](#uint64) | | | + + + + + + + + +### TimestampedSignatureData +TimestampedSignatureData contains the signature data and the timestamp of the +signature. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `signature_data` | [bytes](#bytes) | | | +| `timestamp` | [uint64](#uint64) | | | + + + + + + + + + + +### DataType +DataType defines the type of solo machine proof being created. This is done +to preserve uniqueness of different data sign byte encodings. + +| Name | Number | Description | +| ---- | ------ | ----------- | +| DATA_TYPE_UNINITIALIZED_UNSPECIFIED | 0 | Default State | +| DATA_TYPE_CLIENT_STATE | 1 | Data type for client state verification | +| DATA_TYPE_CONSENSUS_STATE | 2 | Data type for consensus state verification | +| DATA_TYPE_CONNECTION_STATE | 3 | Data type for connection state verification | +| DATA_TYPE_CHANNEL_STATE | 4 | Data type for channel state verification | +| DATA_TYPE_PACKET_COMMITMENT | 5 | Data type for packet commitment verification | +| DATA_TYPE_PACKET_ACKNOWLEDGEMENT | 6 | Data type for packet acknowledgement verification | +| DATA_TYPE_PACKET_RECEIPT_ABSENCE | 7 | Data type for packet receipt absence verification | +| DATA_TYPE_NEXT_SEQUENCE_RECV | 8 | Data type for next sequence recv verification | +| DATA_TYPE_HEADER | 9 | Data type for header verification | + + + + + + + + + +

Top

diff --git a/go.mod b/go.mod index 233f26b718e..f4100fcb9c5 100644 --- a/go.mod +++ b/go.mod @@ -22,4 +22,5 @@ require ( github.com/tendermint/tm-db v0.6.4 google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f google.golang.org/grpc v1.37.0 + google.golang.org/protobuf v1.26.0 ) diff --git a/modules/core/02-client/legacy/v100/solomachine.pb.go b/modules/core/02-client/legacy/v100/solomachine.pb.go new file mode 100644 index 00000000000..c35edaf8b39 --- /dev/null +++ b/modules/core/02-client/legacy/v100/solomachine.pb.go @@ -0,0 +1,4121 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibc/lightclients/solomachine/v1/solomachine.proto + +package v100 + +import ( + fmt "fmt" + types "github.com/cosmos/cosmos-sdk/codec/types" + types1 "github.com/cosmos/ibc-go/modules/core/03-connection/types" + types2 "github.com/cosmos/ibc-go/modules/core/04-channel/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// DataType defines the type of solo machine proof being created. This is done +// to preserve uniqueness of different data sign byte encodings. +type DataType int32 + +const ( + // Default State + UNSPECIFIED DataType = 0 + // Data type for client state verification + CLIENT DataType = 1 + // Data type for consensus state verification + CONSENSUS DataType = 2 + // Data type for connection state verification + CONNECTION DataType = 3 + // Data type for channel state verification + CHANNEL DataType = 4 + // Data type for packet commitment verification + PACKETCOMMITMENT DataType = 5 + // Data type for packet acknowledgement verification + PACKETACKNOWLEDGEMENT DataType = 6 + // Data type for packet receipt absence verification + PACKETRECEIPTABSENCE DataType = 7 + // Data type for next sequence recv verification + NEXTSEQUENCERECV DataType = 8 + // Data type for header verification + HEADER DataType = 9 +) + +var DataType_name = map[int32]string{ + 0: "DATA_TYPE_UNINITIALIZED_UNSPECIFIED", + 1: "DATA_TYPE_CLIENT_STATE", + 2: "DATA_TYPE_CONSENSUS_STATE", + 3: "DATA_TYPE_CONNECTION_STATE", + 4: "DATA_TYPE_CHANNEL_STATE", + 5: "DATA_TYPE_PACKET_COMMITMENT", + 6: "DATA_TYPE_PACKET_ACKNOWLEDGEMENT", + 7: "DATA_TYPE_PACKET_RECEIPT_ABSENCE", + 8: "DATA_TYPE_NEXT_SEQUENCE_RECV", + 9: "DATA_TYPE_HEADER", +} + +var DataType_value = map[string]int32{ + "DATA_TYPE_UNINITIALIZED_UNSPECIFIED": 0, + "DATA_TYPE_CLIENT_STATE": 1, + "DATA_TYPE_CONSENSUS_STATE": 2, + "DATA_TYPE_CONNECTION_STATE": 3, + "DATA_TYPE_CHANNEL_STATE": 4, + "DATA_TYPE_PACKET_COMMITMENT": 5, + "DATA_TYPE_PACKET_ACKNOWLEDGEMENT": 6, + "DATA_TYPE_PACKET_RECEIPT_ABSENCE": 7, + "DATA_TYPE_NEXT_SEQUENCE_RECV": 8, + "DATA_TYPE_HEADER": 9, +} + +func (x DataType) String() string { + return proto.EnumName(DataType_name, int32(x)) +} + +func (DataType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{0} +} + +// ClientState defines a solo machine client that tracks the current consensus +// state and if the client is frozen. +type ClientState struct { + // latest sequence of the client state + Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"` + // frozen sequence of the solo machine + FrozenSequence uint64 `protobuf:"varint,2,opt,name=frozen_sequence,json=frozenSequence,proto3" json:"frozen_sequence,omitempty" yaml:"frozen_sequence"` + ConsensusState *ConsensusState `protobuf:"bytes,3,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty" yaml:"consensus_state"` + // when set to true, will allow governance to update a solo machine client. + // The client will be unfrozen if it is frozen. + AllowUpdateAfterProposal bool `protobuf:"varint,4,opt,name=allow_update_after_proposal,json=allowUpdateAfterProposal,proto3" json:"allow_update_after_proposal,omitempty" yaml:"allow_update_after_proposal"` +} + +func (m *ClientState) Reset() { *m = ClientState{} } +func (m *ClientState) String() string { return proto.CompactTextString(m) } +func (*ClientState) ProtoMessage() {} +func (*ClientState) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{0} +} +func (m *ClientState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClientState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClientState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientState.Merge(m, src) +} +func (m *ClientState) XXX_Size() int { + return m.Size() +} +func (m *ClientState) XXX_DiscardUnknown() { + xxx_messageInfo_ClientState.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientState proto.InternalMessageInfo + +// ConsensusState defines a solo machine consensus state. The sequence of a +// consensus state is contained in the "height" key used in storing the +// consensus state. +type ConsensusState struct { + // public key of the solo machine + PublicKey *types.Any `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty" yaml:"public_key"` + // diversifier allows the same public key to be re-used across different solo + // machine clients (potentially on different chains) without being considered + // misbehaviour. + Diversifier string `protobuf:"bytes,2,opt,name=diversifier,proto3" json:"diversifier,omitempty"` + Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (m *ConsensusState) Reset() { *m = ConsensusState{} } +func (m *ConsensusState) String() string { return proto.CompactTextString(m) } +func (*ConsensusState) ProtoMessage() {} +func (*ConsensusState) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{1} +} +func (m *ConsensusState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConsensusState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConsensusState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConsensusState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusState.Merge(m, src) +} +func (m *ConsensusState) XXX_Size() int { + return m.Size() +} +func (m *ConsensusState) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusState.DiscardUnknown(m) +} + +var xxx_messageInfo_ConsensusState proto.InternalMessageInfo + +// Header defines a solo machine consensus header +type Header struct { + // sequence to update solo machine public key at + Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"` + Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"` + NewPublicKey *types.Any `protobuf:"bytes,4,opt,name=new_public_key,json=newPublicKey,proto3" json:"new_public_key,omitempty" yaml:"new_public_key"` + NewDiversifier string `protobuf:"bytes,5,opt,name=new_diversifier,json=newDiversifier,proto3" json:"new_diversifier,omitempty" yaml:"new_diversifier"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{2} +} +func (m *Header) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Header.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_Header.Merge(m, src) +} +func (m *Header) XXX_Size() int { + return m.Size() +} +func (m *Header) XXX_DiscardUnknown() { + xxx_messageInfo_Header.DiscardUnknown(m) +} + +var xxx_messageInfo_Header proto.InternalMessageInfo + +// Misbehaviour defines misbehaviour for a solo machine which consists +// of a sequence and two signatures over different messages at that sequence. +type Misbehaviour struct { + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"` + Sequence uint64 `protobuf:"varint,2,opt,name=sequence,proto3" json:"sequence,omitempty"` + SignatureOne *SignatureAndData `protobuf:"bytes,3,opt,name=signature_one,json=signatureOne,proto3" json:"signature_one,omitempty" yaml:"signature_one"` + SignatureTwo *SignatureAndData `protobuf:"bytes,4,opt,name=signature_two,json=signatureTwo,proto3" json:"signature_two,omitempty" yaml:"signature_two"` +} + +func (m *Misbehaviour) Reset() { *m = Misbehaviour{} } +func (m *Misbehaviour) String() string { return proto.CompactTextString(m) } +func (*Misbehaviour) ProtoMessage() {} +func (*Misbehaviour) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{3} +} +func (m *Misbehaviour) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Misbehaviour) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Misbehaviour.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Misbehaviour) XXX_Merge(src proto.Message) { + xxx_messageInfo_Misbehaviour.Merge(m, src) +} +func (m *Misbehaviour) XXX_Size() int { + return m.Size() +} +func (m *Misbehaviour) XXX_DiscardUnknown() { + xxx_messageInfo_Misbehaviour.DiscardUnknown(m) +} + +var xxx_messageInfo_Misbehaviour proto.InternalMessageInfo + +// SignatureAndData contains a signature and the data signed over to create that +// signature. +type SignatureAndData struct { + Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` + DataType DataType `protobuf:"varint,2,opt,name=data_type,json=dataType,proto3,enum=ibc.lightclients.solomachine.v1.DataType" json:"data_type,omitempty" yaml:"data_type"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (m *SignatureAndData) Reset() { *m = SignatureAndData{} } +func (m *SignatureAndData) String() string { return proto.CompactTextString(m) } +func (*SignatureAndData) ProtoMessage() {} +func (*SignatureAndData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{4} +} +func (m *SignatureAndData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignatureAndData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignatureAndData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignatureAndData) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureAndData.Merge(m, src) +} +func (m *SignatureAndData) XXX_Size() int { + return m.Size() +} +func (m *SignatureAndData) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureAndData.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureAndData proto.InternalMessageInfo + +// TimestampedSignatureData contains the signature data and the timestamp of the +// signature. +type TimestampedSignatureData struct { + SignatureData []byte `protobuf:"bytes,1,opt,name=signature_data,json=signatureData,proto3" json:"signature_data,omitempty" yaml:"signature_data"` + Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (m *TimestampedSignatureData) Reset() { *m = TimestampedSignatureData{} } +func (m *TimestampedSignatureData) String() string { return proto.CompactTextString(m) } +func (*TimestampedSignatureData) ProtoMessage() {} +func (*TimestampedSignatureData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{5} +} +func (m *TimestampedSignatureData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimestampedSignatureData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimestampedSignatureData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimestampedSignatureData) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimestampedSignatureData.Merge(m, src) +} +func (m *TimestampedSignatureData) XXX_Size() int { + return m.Size() +} +func (m *TimestampedSignatureData) XXX_DiscardUnknown() { + xxx_messageInfo_TimestampedSignatureData.DiscardUnknown(m) +} + +var xxx_messageInfo_TimestampedSignatureData proto.InternalMessageInfo + +// SignBytes defines the signed bytes used for signature verification. +type SignBytes struct { + Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"` + Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Diversifier string `protobuf:"bytes,3,opt,name=diversifier,proto3" json:"diversifier,omitempty"` + // type of the data used + DataType DataType `protobuf:"varint,4,opt,name=data_type,json=dataType,proto3,enum=ibc.lightclients.solomachine.v1.DataType" json:"data_type,omitempty" yaml:"data_type"` + // marshaled data + Data []byte `protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *SignBytes) Reset() { *m = SignBytes{} } +func (m *SignBytes) String() string { return proto.CompactTextString(m) } +func (*SignBytes) ProtoMessage() {} +func (*SignBytes) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{6} +} +func (m *SignBytes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignBytes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignBytes.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignBytes) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignBytes.Merge(m, src) +} +func (m *SignBytes) XXX_Size() int { + return m.Size() +} +func (m *SignBytes) XXX_DiscardUnknown() { + xxx_messageInfo_SignBytes.DiscardUnknown(m) +} + +var xxx_messageInfo_SignBytes proto.InternalMessageInfo + +// HeaderData returns the SignBytes data for update verification. +type HeaderData struct { + // header public key + NewPubKey *types.Any `protobuf:"bytes,1,opt,name=new_pub_key,json=newPubKey,proto3" json:"new_pub_key,omitempty" yaml:"new_pub_key"` + // header diversifier + NewDiversifier string `protobuf:"bytes,2,opt,name=new_diversifier,json=newDiversifier,proto3" json:"new_diversifier,omitempty" yaml:"new_diversifier"` +} + +func (m *HeaderData) Reset() { *m = HeaderData{} } +func (m *HeaderData) String() string { return proto.CompactTextString(m) } +func (*HeaderData) ProtoMessage() {} +func (*HeaderData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{7} +} +func (m *HeaderData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HeaderData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HeaderData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HeaderData) XXX_Merge(src proto.Message) { + xxx_messageInfo_HeaderData.Merge(m, src) +} +func (m *HeaderData) XXX_Size() int { + return m.Size() +} +func (m *HeaderData) XXX_DiscardUnknown() { + xxx_messageInfo_HeaderData.DiscardUnknown(m) +} + +var xxx_messageInfo_HeaderData proto.InternalMessageInfo + +// ClientStateData returns the SignBytes data for client state verification. +type ClientStateData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + ClientState *types.Any `protobuf:"bytes,2,opt,name=client_state,json=clientState,proto3" json:"client_state,omitempty" yaml:"client_state"` +} + +func (m *ClientStateData) Reset() { *m = ClientStateData{} } +func (m *ClientStateData) String() string { return proto.CompactTextString(m) } +func (*ClientStateData) ProtoMessage() {} +func (*ClientStateData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{8} +} +func (m *ClientStateData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClientStateData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClientStateData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientStateData.Merge(m, src) +} +func (m *ClientStateData) XXX_Size() int { + return m.Size() +} +func (m *ClientStateData) XXX_DiscardUnknown() { + xxx_messageInfo_ClientStateData.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientStateData proto.InternalMessageInfo + +// ConsensusStateData returns the SignBytes data for consensus state +// verification. +type ConsensusStateData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + ConsensusState *types.Any `protobuf:"bytes,2,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty" yaml:"consensus_state"` +} + +func (m *ConsensusStateData) Reset() { *m = ConsensusStateData{} } +func (m *ConsensusStateData) String() string { return proto.CompactTextString(m) } +func (*ConsensusStateData) ProtoMessage() {} +func (*ConsensusStateData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{9} +} +func (m *ConsensusStateData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConsensusStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConsensusStateData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConsensusStateData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusStateData.Merge(m, src) +} +func (m *ConsensusStateData) XXX_Size() int { + return m.Size() +} +func (m *ConsensusStateData) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusStateData.DiscardUnknown(m) +} + +var xxx_messageInfo_ConsensusStateData proto.InternalMessageInfo + +// ConnectionStateData returns the SignBytes data for connection state +// verification. +type ConnectionStateData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Connection *types1.ConnectionEnd `protobuf:"bytes,2,opt,name=connection,proto3" json:"connection,omitempty"` +} + +func (m *ConnectionStateData) Reset() { *m = ConnectionStateData{} } +func (m *ConnectionStateData) String() string { return proto.CompactTextString(m) } +func (*ConnectionStateData) ProtoMessage() {} +func (*ConnectionStateData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{10} +} +func (m *ConnectionStateData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConnectionStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConnectionStateData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConnectionStateData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectionStateData.Merge(m, src) +} +func (m *ConnectionStateData) XXX_Size() int { + return m.Size() +} +func (m *ConnectionStateData) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectionStateData.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectionStateData proto.InternalMessageInfo + +// ChannelStateData returns the SignBytes data for channel state +// verification. +type ChannelStateData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Channel *types2.Channel `protobuf:"bytes,2,opt,name=channel,proto3" json:"channel,omitempty"` +} + +func (m *ChannelStateData) Reset() { *m = ChannelStateData{} } +func (m *ChannelStateData) String() string { return proto.CompactTextString(m) } +func (*ChannelStateData) ProtoMessage() {} +func (*ChannelStateData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{11} +} +func (m *ChannelStateData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChannelStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChannelStateData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ChannelStateData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChannelStateData.Merge(m, src) +} +func (m *ChannelStateData) XXX_Size() int { + return m.Size() +} +func (m *ChannelStateData) XXX_DiscardUnknown() { + xxx_messageInfo_ChannelStateData.DiscardUnknown(m) +} + +var xxx_messageInfo_ChannelStateData proto.InternalMessageInfo + +// PacketCommitmentData returns the SignBytes data for packet commitment +// verification. +type PacketCommitmentData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Commitment []byte `protobuf:"bytes,2,opt,name=commitment,proto3" json:"commitment,omitempty"` +} + +func (m *PacketCommitmentData) Reset() { *m = PacketCommitmentData{} } +func (m *PacketCommitmentData) String() string { return proto.CompactTextString(m) } +func (*PacketCommitmentData) ProtoMessage() {} +func (*PacketCommitmentData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{12} +} +func (m *PacketCommitmentData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PacketCommitmentData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PacketCommitmentData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PacketCommitmentData) XXX_Merge(src proto.Message) { + xxx_messageInfo_PacketCommitmentData.Merge(m, src) +} +func (m *PacketCommitmentData) XXX_Size() int { + return m.Size() +} +func (m *PacketCommitmentData) XXX_DiscardUnknown() { + xxx_messageInfo_PacketCommitmentData.DiscardUnknown(m) +} + +var xxx_messageInfo_PacketCommitmentData proto.InternalMessageInfo + +func (m *PacketCommitmentData) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *PacketCommitmentData) GetCommitment() []byte { + if m != nil { + return m.Commitment + } + return nil +} + +// PacketAcknowledgementData returns the SignBytes data for acknowledgement +// verification. +type PacketAcknowledgementData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Acknowledgement []byte `protobuf:"bytes,2,opt,name=acknowledgement,proto3" json:"acknowledgement,omitempty"` +} + +func (m *PacketAcknowledgementData) Reset() { *m = PacketAcknowledgementData{} } +func (m *PacketAcknowledgementData) String() string { return proto.CompactTextString(m) } +func (*PacketAcknowledgementData) ProtoMessage() {} +func (*PacketAcknowledgementData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{13} +} +func (m *PacketAcknowledgementData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PacketAcknowledgementData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PacketAcknowledgementData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PacketAcknowledgementData) XXX_Merge(src proto.Message) { + xxx_messageInfo_PacketAcknowledgementData.Merge(m, src) +} +func (m *PacketAcknowledgementData) XXX_Size() int { + return m.Size() +} +func (m *PacketAcknowledgementData) XXX_DiscardUnknown() { + xxx_messageInfo_PacketAcknowledgementData.DiscardUnknown(m) +} + +var xxx_messageInfo_PacketAcknowledgementData proto.InternalMessageInfo + +func (m *PacketAcknowledgementData) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *PacketAcknowledgementData) GetAcknowledgement() []byte { + if m != nil { + return m.Acknowledgement + } + return nil +} + +// PacketReceiptAbsenceData returns the SignBytes data for +// packet receipt absence verification. +type PacketReceiptAbsenceData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` +} + +func (m *PacketReceiptAbsenceData) Reset() { *m = PacketReceiptAbsenceData{} } +func (m *PacketReceiptAbsenceData) String() string { return proto.CompactTextString(m) } +func (*PacketReceiptAbsenceData) ProtoMessage() {} +func (*PacketReceiptAbsenceData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{14} +} +func (m *PacketReceiptAbsenceData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PacketReceiptAbsenceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PacketReceiptAbsenceData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PacketReceiptAbsenceData) XXX_Merge(src proto.Message) { + xxx_messageInfo_PacketReceiptAbsenceData.Merge(m, src) +} +func (m *PacketReceiptAbsenceData) XXX_Size() int { + return m.Size() +} +func (m *PacketReceiptAbsenceData) XXX_DiscardUnknown() { + xxx_messageInfo_PacketReceiptAbsenceData.DiscardUnknown(m) +} + +var xxx_messageInfo_PacketReceiptAbsenceData proto.InternalMessageInfo + +func (m *PacketReceiptAbsenceData) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +// NextSequenceRecvData returns the SignBytes data for verification of the next +// sequence to be received. +type NextSequenceRecvData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + NextSeqRecv uint64 `protobuf:"varint,2,opt,name=next_seq_recv,json=nextSeqRecv,proto3" json:"next_seq_recv,omitempty" yaml:"next_seq_recv"` +} + +func (m *NextSequenceRecvData) Reset() { *m = NextSequenceRecvData{} } +func (m *NextSequenceRecvData) String() string { return proto.CompactTextString(m) } +func (*NextSequenceRecvData) ProtoMessage() {} +func (*NextSequenceRecvData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{15} +} +func (m *NextSequenceRecvData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NextSequenceRecvData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NextSequenceRecvData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NextSequenceRecvData) XXX_Merge(src proto.Message) { + xxx_messageInfo_NextSequenceRecvData.Merge(m, src) +} +func (m *NextSequenceRecvData) XXX_Size() int { + return m.Size() +} +func (m *NextSequenceRecvData) XXX_DiscardUnknown() { + xxx_messageInfo_NextSequenceRecvData.DiscardUnknown(m) +} + +var xxx_messageInfo_NextSequenceRecvData proto.InternalMessageInfo + +func (m *NextSequenceRecvData) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *NextSequenceRecvData) GetNextSeqRecv() uint64 { + if m != nil { + return m.NextSeqRecv + } + return 0 +} + +func init() { + proto.RegisterEnum("ibc.lightclients.solomachine.v1.DataType", DataType_name, DataType_value) + proto.RegisterType((*ClientState)(nil), "ibc.lightclients.solomachine.v1.ClientState") + proto.RegisterType((*ConsensusState)(nil), "ibc.lightclients.solomachine.v1.ConsensusState") + proto.RegisterType((*Header)(nil), "ibc.lightclients.solomachine.v1.Header") + proto.RegisterType((*Misbehaviour)(nil), "ibc.lightclients.solomachine.v1.Misbehaviour") + proto.RegisterType((*SignatureAndData)(nil), "ibc.lightclients.solomachine.v1.SignatureAndData") + proto.RegisterType((*TimestampedSignatureData)(nil), "ibc.lightclients.solomachine.v1.TimestampedSignatureData") + proto.RegisterType((*SignBytes)(nil), "ibc.lightclients.solomachine.v1.SignBytes") + proto.RegisterType((*HeaderData)(nil), "ibc.lightclients.solomachine.v1.HeaderData") + proto.RegisterType((*ClientStateData)(nil), "ibc.lightclients.solomachine.v1.ClientStateData") + proto.RegisterType((*ConsensusStateData)(nil), "ibc.lightclients.solomachine.v1.ConsensusStateData") + proto.RegisterType((*ConnectionStateData)(nil), "ibc.lightclients.solomachine.v1.ConnectionStateData") + proto.RegisterType((*ChannelStateData)(nil), "ibc.lightclients.solomachine.v1.ChannelStateData") + proto.RegisterType((*PacketCommitmentData)(nil), "ibc.lightclients.solomachine.v1.PacketCommitmentData") + proto.RegisterType((*PacketAcknowledgementData)(nil), "ibc.lightclients.solomachine.v1.PacketAcknowledgementData") + proto.RegisterType((*PacketReceiptAbsenceData)(nil), "ibc.lightclients.solomachine.v1.PacketReceiptAbsenceData") + proto.RegisterType((*NextSequenceRecvData)(nil), "ibc.lightclients.solomachine.v1.NextSequenceRecvData") +} + +func init() { + proto.RegisterFile("ibc/lightclients/solomachine/v1/solomachine.proto", fileDescriptor_6cc2ee18f7f86d4e) +} + +var fileDescriptor_6cc2ee18f7f86d4e = []byte{ + // 1368 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xdf, 0x8e, 0xdb, 0x54, + 0x13, 0x5f, 0xa7, 0xe9, 0x76, 0x33, 0xd9, 0xee, 0xe6, 0x73, 0xd3, 0x36, 0xeb, 0x56, 0x89, 0x3f, + 0x7f, 0xfa, 0xca, 0x82, 0x68, 0xd2, 0x5d, 0x44, 0x85, 0x0a, 0x02, 0x1c, 0xc7, 0xd0, 0xb4, 0xbb, + 0xde, 0xe0, 0x38, 0x40, 0x2b, 0x24, 0xcb, 0x71, 0xce, 0x26, 0x56, 0x13, 0x9f, 0x10, 0x3b, 0x49, + 0x83, 0x84, 0x84, 0xb8, 0x2a, 0x11, 0x17, 0xbc, 0x40, 0x24, 0x04, 0xe2, 0x55, 0x80, 0xcb, 0x72, + 0xc7, 0x55, 0x40, 0xed, 0x1b, 0xe4, 0x09, 0x90, 0x7d, 0x4e, 0x62, 0x3b, 0xdb, 0xcd, 0x8a, 0x7f, + 0x77, 0xe7, 0xcc, 0xfc, 0xe6, 0x37, 0x73, 0x66, 0xc6, 0x73, 0x8e, 0x61, 0xcf, 0xaa, 0x9b, 0x85, + 0xb6, 0xd5, 0x6c, 0xb9, 0x66, 0xdb, 0x42, 0xb6, 0xeb, 0x14, 0x1c, 0xdc, 0xc6, 0x1d, 0xc3, 0x6c, + 0x59, 0x36, 0x2a, 0x0c, 0xf6, 0xc2, 0xdb, 0x7c, 0xb7, 0x87, 0x5d, 0xcc, 0xe6, 0xac, 0xba, 0x99, + 0x0f, 0x9b, 0xe4, 0xc3, 0x98, 0xc1, 0x1e, 0xf7, 0x92, 0xc7, 0x69, 0xe2, 0x1e, 0x2a, 0x98, 0xd8, + 0xb6, 0x91, 0xe9, 0x5a, 0xd8, 0xf6, 0xa8, 0x82, 0x1d, 0x61, 0xe2, 0xfe, 0x1b, 0x00, 0x5b, 0x86, + 0x6d, 0xa3, 0xb6, 0x8f, 0x22, 0x4b, 0x0a, 0x49, 0x37, 0x71, 0x13, 0xfb, 0xcb, 0x82, 0xb7, 0xa2, + 0xd2, 0x9d, 0x26, 0xc6, 0xcd, 0x36, 0x2a, 0xf8, 0xbb, 0x7a, 0xff, 0xb8, 0x60, 0xd8, 0x23, 0xa2, + 0x12, 0x7e, 0x89, 0x41, 0x52, 0xf2, 0xe3, 0xaa, 0xba, 0x86, 0x8b, 0x58, 0x0e, 0x36, 0x1c, 0xf4, + 0x69, 0x1f, 0xd9, 0x26, 0xca, 0x30, 0x3c, 0xb3, 0x1b, 0x57, 0x17, 0x7b, 0x56, 0x82, 0xed, 0xe3, + 0x1e, 0xfe, 0x0c, 0xd9, 0xfa, 0x02, 0x12, 0xf3, 0x20, 0x45, 0x6e, 0x36, 0xcd, 0x5d, 0x19, 0x19, + 0x9d, 0xf6, 0x1d, 0x61, 0x09, 0x20, 0xa8, 0x5b, 0x44, 0x52, 0x9d, 0x93, 0xb8, 0xb0, 0x6d, 0x62, + 0xdb, 0x41, 0xb6, 0xd3, 0x77, 0x74, 0xc7, 0xf3, 0x99, 0x39, 0xc7, 0x33, 0xbb, 0xc9, 0xfd, 0x42, + 0xfe, 0x8c, 0x44, 0xe5, 0xa5, 0xb9, 0x9d, 0x1f, 0x6a, 0xd8, 0xeb, 0x12, 0xa3, 0xa0, 0x6e, 0x99, + 0x11, 0x2c, 0x8b, 0xe0, 0x9a, 0xd1, 0x6e, 0xe3, 0xa1, 0xde, 0xef, 0x36, 0x0c, 0x17, 0xe9, 0xc6, + 0xb1, 0x8b, 0x7a, 0x7a, 0xb7, 0x87, 0xbb, 0xd8, 0x31, 0xda, 0x99, 0x38, 0xcf, 0xec, 0x6e, 0x14, + 0x6f, 0xcc, 0xa6, 0x39, 0x81, 0x10, 0xae, 0x00, 0x0b, 0x6a, 0xc6, 0xd7, 0xd6, 0x7c, 0xa5, 0xe8, + 0xe9, 0x2a, 0x54, 0x75, 0x27, 0xfe, 0xe4, 0xdb, 0xdc, 0x9a, 0xf0, 0x1d, 0x03, 0x5b, 0xd1, 0x58, + 0xd9, 0x7b, 0x00, 0xdd, 0x7e, 0xbd, 0x6d, 0x99, 0xfa, 0x23, 0x34, 0xf2, 0x13, 0x9b, 0xdc, 0x4f, + 0xe7, 0x49, 0x59, 0xf2, 0xf3, 0xb2, 0xe4, 0x45, 0x7b, 0x54, 0xbc, 0x3c, 0x9b, 0xe6, 0xfe, 0x43, + 0x82, 0x08, 0x2c, 0x04, 0x35, 0x41, 0x36, 0xf7, 0xd1, 0x88, 0xe5, 0x21, 0xd9, 0xb0, 0x06, 0xa8, + 0xe7, 0x58, 0xc7, 0x16, 0xea, 0xf9, 0x25, 0x48, 0xa8, 0x61, 0x11, 0x7b, 0x1d, 0x12, 0xae, 0xd5, + 0x41, 0x8e, 0x6b, 0x74, 0xba, 0x7e, 0x76, 0xe3, 0x6a, 0x20, 0xa0, 0x41, 0x7e, 0x19, 0x83, 0xf5, + 0xbb, 0xc8, 0x68, 0xa0, 0xde, 0xca, 0x9a, 0x47, 0xa8, 0x62, 0x4b, 0x54, 0x9e, 0xd6, 0xb1, 0x9a, + 0xb6, 0xe1, 0xf6, 0x7b, 0xa4, 0x8c, 0x9b, 0x6a, 0x20, 0x60, 0x6b, 0xb0, 0x65, 0xa3, 0xa1, 0x1e, + 0x3a, 0x78, 0x7c, 0xc5, 0xc1, 0x77, 0x66, 0xd3, 0xdc, 0x65, 0x72, 0xf0, 0xa8, 0x95, 0xa0, 0x6e, + 0xda, 0x68, 0x58, 0x59, 0x9c, 0x5f, 0x82, 0x6d, 0x0f, 0x10, 0xce, 0xc1, 0x79, 0x2f, 0x07, 0xe1, + 0x86, 0x58, 0x02, 0x08, 0xaa, 0x17, 0x49, 0x29, 0x10, 0xd0, 0x24, 0xfc, 0x14, 0x83, 0xcd, 0x43, + 0xcb, 0xa9, 0xa3, 0x96, 0x31, 0xb0, 0x70, 0xbf, 0xc7, 0xee, 0x41, 0x82, 0x34, 0x9f, 0x6e, 0x35, + 0xfc, 0x5c, 0x24, 0x8a, 0xe9, 0xd9, 0x34, 0x97, 0xa2, 0x6d, 0x36, 0x57, 0x09, 0xea, 0x06, 0x59, + 0x97, 0x1b, 0x91, 0xec, 0xc5, 0x96, 0xb2, 0xd7, 0x85, 0x8b, 0x8b, 0x74, 0xe8, 0xd8, 0x9e, 0xb7, + 0xfa, 0xde, 0x99, 0xad, 0x5e, 0x9d, 0x5b, 0x89, 0x76, 0xa3, 0x64, 0xb8, 0x46, 0x31, 0x33, 0x9b, + 0xe6, 0xd2, 0x24, 0x8a, 0x08, 0xa3, 0xa0, 0x6e, 0x2e, 0xf6, 0x47, 0xf6, 0x92, 0x47, 0x77, 0x88, + 0x69, 0xca, 0xff, 0x29, 0x8f, 0xee, 0x10, 0x87, 0x3d, 0x6a, 0x43, 0x4c, 0x33, 0xf9, 0x23, 0x03, + 0xa9, 0x65, 0x8a, 0x68, 0x7b, 0x30, 0xcb, 0xed, 0xf1, 0x09, 0x24, 0x1a, 0x86, 0x6b, 0xe8, 0xee, + 0xa8, 0x4b, 0x32, 0xb7, 0xb5, 0xff, 0xf2, 0x99, 0x61, 0x7a, 0xbc, 0xda, 0xa8, 0x8b, 0xc2, 0x65, + 0x59, 0xb0, 0x08, 0xea, 0x46, 0x83, 0xea, 0x59, 0x16, 0xe2, 0xde, 0x9a, 0x76, 0xa5, 0xbf, 0x8e, + 0x36, 0x73, 0xfc, 0xc5, 0xdf, 0xc5, 0x17, 0x0c, 0x64, 0xb4, 0xb9, 0x0c, 0x35, 0x16, 0x67, 0xf2, + 0x0f, 0xf4, 0x2e, 0x6c, 0x05, 0xb9, 0xf0, 0xe9, 0xfd, 0x53, 0x85, 0x7b, 0x37, 0xaa, 0x17, 0xd4, + 0xa0, 0x1c, 0xa5, 0x13, 0x21, 0xc4, 0x5e, 0x1c, 0xc2, 0x6f, 0x0c, 0x24, 0x3c, 0xbf, 0xc5, 0x91, + 0x8b, 0x9c, 0xbf, 0xf1, 0x75, 0x2e, 0x0d, 0x8a, 0x73, 0x27, 0x07, 0x45, 0xa4, 0x04, 0xf1, 0x7f, + 0xab, 0x04, 0xe7, 0x83, 0x12, 0xd0, 0x13, 0xfe, 0xc0, 0x00, 0x90, 0xe1, 0xe3, 0x27, 0xe5, 0x00, + 0x92, 0xf4, 0x93, 0x3f, 0x73, 0x3c, 0x5e, 0x99, 0x4d, 0x73, 0x6c, 0x64, 0x4a, 0xd0, 0xf9, 0x48, + 0x46, 0xc4, 0x29, 0xf3, 0x21, 0xf6, 0x17, 0xe7, 0xc3, 0xe7, 0xb0, 0x1d, 0xba, 0x1c, 0xfd, 0x58, + 0x59, 0x88, 0x77, 0x0d, 0xb7, 0x45, 0xdb, 0xd9, 0x5f, 0xb3, 0x15, 0xd8, 0xa4, 0xa3, 0x81, 0x5c, + 0x68, 0xb1, 0x15, 0x07, 0xb8, 0x3a, 0x9b, 0xe6, 0x2e, 0x45, 0xc6, 0x09, 0xbd, 0xb2, 0x92, 0x66, + 0xe0, 0x89, 0xba, 0xff, 0x8a, 0x01, 0x36, 0x7a, 0x91, 0x9c, 0x1a, 0xc2, 0x83, 0x93, 0xd7, 0xea, + 0xaa, 0x28, 0xfe, 0xc4, 0xdd, 0x49, 0x63, 0x19, 0xc0, 0x25, 0x69, 0xf1, 0x20, 0x59, 0x1d, 0x8b, + 0x0c, 0x10, 0xbc, 0x5d, 0x68, 0x18, 0xff, 0xf7, 0xdb, 0xca, 0x7b, 0xbc, 0xe4, 0x43, 0xef, 0x1a, + 0x72, 0xa9, 0xd3, 0x9d, 0x6c, 0x37, 0xd4, 0x90, 0x21, 0xf5, 0xdb, 0x80, 0x94, 0x44, 0x9e, 0x38, + 0xab, 0x9d, 0xde, 0x86, 0x0b, 0xf4, 0x29, 0x44, 0x3d, 0x5e, 0x0f, 0x79, 0xa4, 0x6f, 0x24, 0xcf, + 0x1d, 0x59, 0xaa, 0x73, 0x30, 0xf5, 0x72, 0x0f, 0xd2, 0x15, 0xc3, 0x7c, 0x84, 0x5c, 0x09, 0x77, + 0x3a, 0x96, 0xdb, 0x41, 0xb6, 0x7b, 0xaa, 0xa7, 0xac, 0x77, 0xbc, 0x39, 0xca, 0x77, 0xb6, 0xa9, + 0x86, 0x24, 0xc2, 0x03, 0xd8, 0x21, 0x5c, 0xa2, 0xf9, 0xc8, 0xc6, 0xc3, 0x36, 0x6a, 0x34, 0xd1, + 0x4a, 0xc2, 0x5d, 0xd8, 0x36, 0xa2, 0x50, 0xca, 0xba, 0x2c, 0x16, 0xf2, 0x90, 0x21, 0xd4, 0x2a, + 0x32, 0x91, 0xd5, 0x75, 0xc5, 0xba, 0xe3, 0xcd, 0x81, 0xd3, 0x98, 0x85, 0x16, 0xa4, 0x15, 0xf4, + 0xd8, 0x9d, 0x3f, 0xbe, 0x54, 0x64, 0x0e, 0x4e, 0x8d, 0xe2, 0x2d, 0xb8, 0x68, 0xa3, 0xc7, 0xae, + 0xf7, 0x74, 0xd3, 0x7b, 0xc8, 0x1c, 0xd0, 0xb7, 0x5d, 0xe8, 0x1a, 0x88, 0xa8, 0x05, 0x35, 0x69, + 0x13, 0x6a, 0x8f, 0xf5, 0x95, 0xaf, 0xe3, 0xb0, 0x31, 0x1f, 0x0c, 0xec, 0x1b, 0xf0, 0xbf, 0x92, + 0xa8, 0x89, 0xba, 0xf6, 0xa0, 0x22, 0xeb, 0x35, 0xa5, 0xac, 0x94, 0xb5, 0xb2, 0x78, 0x50, 0x7e, + 0x28, 0x97, 0xf4, 0x9a, 0x52, 0xad, 0xc8, 0x52, 0xf9, 0xbd, 0xb2, 0x5c, 0x4a, 0xad, 0x71, 0xdb, + 0xe3, 0x09, 0x9f, 0x0c, 0x89, 0xd8, 0x1b, 0x70, 0x25, 0xb0, 0x94, 0x0e, 0xca, 0xb2, 0xa2, 0xe9, + 0x55, 0x4d, 0xd4, 0xe4, 0x14, 0xc3, 0xc1, 0x78, 0xc2, 0xaf, 0x13, 0x19, 0xfb, 0x2a, 0xec, 0x84, + 0x70, 0x47, 0x4a, 0x55, 0x56, 0xaa, 0xb5, 0x2a, 0x85, 0xc6, 0xb8, 0x8b, 0xe3, 0x09, 0x9f, 0x58, + 0x88, 0xd9, 0x3c, 0x70, 0x11, 0xb4, 0x22, 0x4b, 0x5a, 0xf9, 0x48, 0xa1, 0xf0, 0x73, 0xdc, 0xd6, + 0x78, 0xc2, 0x43, 0x20, 0x67, 0x77, 0xe1, 0x6a, 0x08, 0x7f, 0x57, 0x54, 0x14, 0xf9, 0x80, 0x82, + 0xe3, 0x5c, 0x72, 0x3c, 0xe1, 0x2f, 0x50, 0x21, 0xfb, 0x3a, 0x5c, 0x0b, 0x90, 0x15, 0x51, 0xba, + 0x2f, 0x6b, 0xba, 0x74, 0x74, 0x78, 0x58, 0xd6, 0x0e, 0x65, 0x45, 0x4b, 0x9d, 0xe7, 0xd2, 0xe3, + 0x09, 0x9f, 0x22, 0x8a, 0x40, 0xce, 0xbe, 0x03, 0xfc, 0x09, 0x33, 0x51, 0xba, 0xaf, 0x1c, 0x7d, + 0x74, 0x20, 0x97, 0xde, 0x97, 0x7d, 0xdb, 0x75, 0x6e, 0x67, 0x3c, 0xe1, 0x2f, 0x13, 0xed, 0x92, + 0x92, 0x7d, 0xfb, 0x05, 0x04, 0xaa, 0x2c, 0xc9, 0xe5, 0x8a, 0xa6, 0x8b, 0xc5, 0xaa, 0xac, 0x48, + 0x72, 0xea, 0x02, 0x97, 0x19, 0x4f, 0xf8, 0x34, 0xd1, 0x52, 0x25, 0xd5, 0xb1, 0xb7, 0xe1, 0x7a, + 0x60, 0xaf, 0xc8, 0x1f, 0x6b, 0x7a, 0x55, 0xfe, 0xa0, 0xe6, 0xa9, 0x3c, 0x9a, 0x0f, 0x53, 0x1b, + 0x24, 0x70, 0x4f, 0x33, 0x57, 0x78, 0x72, 0x96, 0x87, 0x54, 0x60, 0x77, 0x57, 0x16, 0x4b, 0xb2, + 0x9a, 0x4a, 0x90, 0xca, 0x90, 0x1d, 0x17, 0x7f, 0xf2, 0x7d, 0x76, 0xad, 0x58, 0xfb, 0xf9, 0x59, + 0x96, 0x79, 0xfa, 0x2c, 0xcb, 0xfc, 0xfe, 0x2c, 0xcb, 0x7c, 0xf3, 0x3c, 0xbb, 0xf6, 0xf4, 0x79, + 0x76, 0xed, 0xd7, 0xe7, 0xd9, 0xb5, 0x87, 0x6f, 0x36, 0x2d, 0xb7, 0xd5, 0xaf, 0xe7, 0x4d, 0xdc, + 0x29, 0x98, 0xd8, 0xe9, 0x60, 0xa7, 0x60, 0xd5, 0xcd, 0x9b, 0x4d, 0x5c, 0xe8, 0xe0, 0x46, 0xbf, + 0x8d, 0x1c, 0xf2, 0x87, 0x73, 0x6b, 0xff, 0x26, 0x99, 0x87, 0x85, 0x36, 0x6a, 0x1a, 0xe6, 0xa8, + 0x30, 0xd8, 0xbb, 0x75, 0xab, 0xbe, 0xee, 0x0f, 0xb1, 0xd7, 0xfe, 0x08, 0x00, 0x00, 0xff, 0xff, + 0xef, 0x59, 0x70, 0x2d, 0x87, 0x0d, 0x00, 0x00, +} + +func (m *ClientState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClientState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AllowUpdateAfterProposal { + i-- + if m.AllowUpdateAfterProposal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.ConsensusState != nil { + { + size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.FrozenSequence != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.FrozenSequence)) + i-- + dAtA[i] = 0x10 + } + if m.Sequence != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ConsensusState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConsensusState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timestamp != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x18 + } + if len(m.Diversifier) > 0 { + i -= len(m.Diversifier) + copy(dAtA[i:], m.Diversifier) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Diversifier))) + i-- + dAtA[i] = 0x12 + } + if m.PublicKey != nil { + { + size, err := m.PublicKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Header) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Header) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NewDiversifier) > 0 { + i -= len(m.NewDiversifier) + copy(dAtA[i:], m.NewDiversifier) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.NewDiversifier))) + i-- + dAtA[i] = 0x2a + } + if m.NewPublicKey != nil { + { + size, err := m.NewPublicKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0x1a + } + if m.Timestamp != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if m.Sequence != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Misbehaviour) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Misbehaviour) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Misbehaviour) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SignatureTwo != nil { + { + size, err := m.SignatureTwo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.SignatureOne != nil { + { + size, err := m.SignatureOne.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Sequence != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x10 + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SignatureAndData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignatureAndData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignatureAndData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timestamp != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x20 + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x1a + } + if m.DataType != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.DataType)) + i-- + dAtA[i] = 0x10 + } + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TimestampedSignatureData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimestampedSignatureData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimestampedSignatureData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timestamp != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if len(m.SignatureData) > 0 { + i -= len(m.SignatureData) + copy(dAtA[i:], m.SignatureData) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.SignatureData))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SignBytes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignBytes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignBytes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x2a + } + if m.DataType != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.DataType)) + i-- + dAtA[i] = 0x20 + } + if len(m.Diversifier) > 0 { + i -= len(m.Diversifier) + copy(dAtA[i:], m.Diversifier) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Diversifier))) + i-- + dAtA[i] = 0x1a + } + if m.Timestamp != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if m.Sequence != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HeaderData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HeaderData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NewDiversifier) > 0 { + i -= len(m.NewDiversifier) + copy(dAtA[i:], m.NewDiversifier) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.NewDiversifier))) + i-- + dAtA[i] = 0x12 + } + if m.NewPubKey != nil { + { + size, err := m.NewPubKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientStateData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientStateData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClientStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ClientState != nil { + { + size, err := m.ClientState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConsensusStateData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusStateData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConsensusStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ConsensusState != nil { + { + size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConnectionStateData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConnectionStateData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConnectionStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Connection != nil { + { + size, err := m.Connection.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ChannelStateData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChannelStateData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChannelStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Channel != nil { + { + size, err := m.Channel.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PacketCommitmentData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PacketCommitmentData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PacketCommitmentData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Commitment) > 0 { + i -= len(m.Commitment) + copy(dAtA[i:], m.Commitment) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Commitment))) + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PacketAcknowledgementData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PacketAcknowledgementData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PacketAcknowledgementData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Acknowledgement) > 0 { + i -= len(m.Acknowledgement) + copy(dAtA[i:], m.Acknowledgement) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Acknowledgement))) + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PacketReceiptAbsenceData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PacketReceiptAbsenceData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PacketReceiptAbsenceData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NextSequenceRecvData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NextSequenceRecvData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NextSequenceRecvData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NextSeqRecv != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.NextSeqRecv)) + i-- + dAtA[i] = 0x10 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintSolomachine(dAtA []byte, offset int, v uint64) int { + offset -= sovSolomachine(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClientState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sequence != 0 { + n += 1 + sovSolomachine(uint64(m.Sequence)) + } + if m.FrozenSequence != 0 { + n += 1 + sovSolomachine(uint64(m.FrozenSequence)) + } + if m.ConsensusState != nil { + l = m.ConsensusState.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.AllowUpdateAfterProposal { + n += 2 + } + return n +} + +func (m *ConsensusState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PublicKey != nil { + l = m.PublicKey.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + l = len(m.Diversifier) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Timestamp != 0 { + n += 1 + sovSolomachine(uint64(m.Timestamp)) + } + return n +} + +func (m *Header) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sequence != 0 { + n += 1 + sovSolomachine(uint64(m.Sequence)) + } + if m.Timestamp != 0 { + n += 1 + sovSolomachine(uint64(m.Timestamp)) + } + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.NewPublicKey != nil { + l = m.NewPublicKey.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + l = len(m.NewDiversifier) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *Misbehaviour) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Sequence != 0 { + n += 1 + sovSolomachine(uint64(m.Sequence)) + } + if m.SignatureOne != nil { + l = m.SignatureOne.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.SignatureTwo != nil { + l = m.SignatureTwo.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *SignatureAndData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.DataType != 0 { + n += 1 + sovSolomachine(uint64(m.DataType)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Timestamp != 0 { + n += 1 + sovSolomachine(uint64(m.Timestamp)) + } + return n +} + +func (m *TimestampedSignatureData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SignatureData) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Timestamp != 0 { + n += 1 + sovSolomachine(uint64(m.Timestamp)) + } + return n +} + +func (m *SignBytes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sequence != 0 { + n += 1 + sovSolomachine(uint64(m.Sequence)) + } + if m.Timestamp != 0 { + n += 1 + sovSolomachine(uint64(m.Timestamp)) + } + l = len(m.Diversifier) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.DataType != 0 { + n += 1 + sovSolomachine(uint64(m.DataType)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *HeaderData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NewPubKey != nil { + l = m.NewPubKey.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + l = len(m.NewDiversifier) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *ClientStateData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.ClientState != nil { + l = m.ClientState.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *ConsensusStateData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.ConsensusState != nil { + l = m.ConsensusState.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *ConnectionStateData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Connection != nil { + l = m.Connection.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *ChannelStateData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Channel != nil { + l = m.Channel.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *PacketCommitmentData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + l = len(m.Commitment) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *PacketAcknowledgementData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + l = len(m.Acknowledgement) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *PacketReceiptAbsenceData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *NextSequenceRecvData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.NextSeqRecv != 0 { + n += 1 + sovSolomachine(uint64(m.NextSeqRecv)) + } + return n +} + +func sovSolomachine(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSolomachine(x uint64) (n int) { + return sovSolomachine(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ClientState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FrozenSequence", wireType) + } + m.FrozenSequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FrozenSequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusState == nil { + m.ConsensusState = &ConsensusState{} + } + if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowUpdateAfterProposal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowUpdateAfterProposal = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConsensusState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PublicKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PublicKey == nil { + m.PublicKey = &types.Any{} + } + if err := m.PublicKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Diversifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Diversifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Header) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Header: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewPublicKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NewPublicKey == nil { + m.NewPublicKey = &types.Any{} + } + if err := m.NewPublicKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewDiversifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NewDiversifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Misbehaviour) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Misbehaviour: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Misbehaviour: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureOne", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SignatureOne == nil { + m.SignatureOne = &SignatureAndData{} + } + if err := m.SignatureOne.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureTwo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SignatureTwo == nil { + m.SignatureTwo = &SignatureAndData{} + } + if err := m.SignatureTwo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignatureAndData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignatureAndData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureAndData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DataType", wireType) + } + m.DataType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DataType |= DataType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimestampedSignatureData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimestampedSignatureData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimestampedSignatureData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureData", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SignatureData = append(m.SignatureData[:0], dAtA[iNdEx:postIndex]...) + if m.SignatureData == nil { + m.SignatureData = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignBytes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignBytes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignBytes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Diversifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Diversifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DataType", wireType) + } + m.DataType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DataType |= DataType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HeaderData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HeaderData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HeaderData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewPubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NewPubKey == nil { + m.NewPubKey = &types.Any{} + } + if err := m.NewPubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewDiversifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NewDiversifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientStateData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientStateData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientStateData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientState == nil { + m.ClientState = &types.Any{} + } + if err := m.ClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConsensusStateData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusStateData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusStateData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusState == nil { + m.ConsensusState = &types.Any{} + } + if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConnectionStateData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConnectionStateData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConnectionStateData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Connection", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Connection == nil { + m.Connection = &types1.ConnectionEnd{} + } + if err := m.Connection.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChannelStateData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChannelStateData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChannelStateData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Channel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Channel == nil { + m.Channel = &types2.Channel{} + } + if err := m.Channel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PacketCommitmentData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PacketCommitmentData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PacketCommitmentData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commitment", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commitment = append(m.Commitment[:0], dAtA[iNdEx:postIndex]...) + if m.Commitment == nil { + m.Commitment = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PacketAcknowledgementData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PacketAcknowledgementData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PacketAcknowledgementData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Acknowledgement", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Acknowledgement = append(m.Acknowledgement[:0], dAtA[iNdEx:postIndex]...) + if m.Acknowledgement == nil { + m.Acknowledgement = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PacketReceiptAbsenceData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PacketReceiptAbsenceData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PacketReceiptAbsenceData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NextSequenceRecvData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NextSequenceRecvData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NextSequenceRecvData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NextSeqRecv", wireType) + } + m.NextSeqRecv = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NextSeqRecv |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSolomachine(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSolomachine + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSolomachine + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSolomachine + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSolomachine + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupSolomachine + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthSolomachine + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthSolomachine = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSolomachine = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupSolomachine = fmt.Errorf("proto: unexpected end of group") +) diff --git a/modules/core/02-client/legacy/v100/store.go b/modules/core/02-client/legacy/v100/store.go index 8318584af16..7c558e1395e 100644 --- a/modules/core/02-client/legacy/v100/store.go +++ b/modules/core/02-client/legacy/v100/store.go @@ -12,19 +12,21 @@ import ( clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types" host "github.com/cosmos/ibc-go/modules/core/24-host" "github.com/cosmos/ibc-go/modules/core/exported" + smtypes "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types" ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types" ) // MigrateStore performs in-place store migrations from SDK v0.40 of the IBC module to v1.0.0 of ibc-go. // The migration includes: // -// - Pruning solo machine clients +// - Migrating solo machine client states from v1 to v2 protobuf definition +// - Pruning all solo machine consensus states from the client stores // - Pruning expired tendermint consensus states func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) (err error) { store := ctx.KVStore(storeKey) iterator := sdk.KVStorePrefixIterator(store, host.KeyClientStorePrefix) - var clients []clienttypes.IdentifiedClientState + var clients []string defer iterator.Close() for ; iterator.Valid(); iterator.Next() { @@ -35,25 +37,22 @@ func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) // key is clients/{clientid}/clientState // Thus, keySplit[1] is clientID - clientID := keySplit[1] - clientState := types.MustUnmarshalClientState(cdc, iterator.Value()) - clients = append(clients, clienttypes.NewIdentifiedClientState(clientID, clientState)) + clients = append(clients, keySplit[1]) } - for _, client := range clients { - clientType, _, err := types.ParseClientIdentifier(client.ClientId) + for _, clientID := range clients { + clientType, _, err := types.ParseClientIdentifier(clientID) if err != nil { return err } - clientPrefix := []byte(fmt.Sprintf("%s/%s/", host.KeyClientStorePrefix, client.ClientId)) + clientPrefix := []byte(fmt.Sprintf("%s/%s/", host.KeyClientStorePrefix, clientID)) clientStore := prefix.NewStore(ctx.KVStore(storeKey), clientPrefix) switch clientType { case exported.Solomachine: - pruneSolomachine(clientStore) - store.Delete([]byte(fmt.Sprintf("%s/%s", host.KeyClientStorePrefix, client.ClientId))) + migrateSolomachine(clientStore, clientID) case exported.Tendermint: clientPrefix := []byte(fmt.Sprintf("%s/%s/", host.KeyClientStorePrefix, client.ClientId)) @@ -80,11 +79,42 @@ func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) return nil } -// pruneSolomachine removes the client state and all consensus states -// stored in the provided clientStore -func pruneSolomachine(clientStore sdk.KVStore) { - // delete client state - clientStore.Delete(host.ClientStateKey()) +// migrateSolomachine migrates the solomachine from v1 to v2 solo machine protobuf defintion. +// It also deletes all consensus states stored in the client store as they are not necessary +// and reference the v1 consensus state type. +func migrateSolomachine(clientStore sdk.KVStore, cdc codec.BinaryCodec, clientID string) error { + // get legacy solo machine from client store + bz := clientStore.Get(host.ClientStateKey()) + if bz == nil { + return clienttypes.ErrClientNotFound + } + + var clientState *ClientState + if err := cdc.UnmarshalInterface(bz, &clientState); err != nil { + return err + } + + isFrozen := clientState.FrozenSequence != 0 + consensusState := &smtypes.ConsensusState{ + PublicKey: clientState.ConsensusState.PublicKey, + Diversifier: clientState.ConsensusState.Diversifier, + Timestamp: clientState.ConsensusState.Timestamp, + } + + newSolomachine := &smtypes.ClientState{ + Sequence: clientState.Sequence, + IsFrozen: isFrozen, + ConsensusState: consensusState, + AllowUpdateAfterProposal: clientState.AllowUpdateAfterProposal, + } + + bz, err := clienttypes.MarshalClientState(cdc, newSolomachine) + if err != nil { + return err + } + + // update solomachine in store + clientStore.Set(host.ClientStateKey(), bz) // collect consensus states to be pruned iterator := sdk.KVStorePrefixIterator(clientStore, []byte(host.KeyConsensusStatePrefix)) @@ -104,4 +134,6 @@ func pruneSolomachine(clientStore sdk.KVStore) { for _, height := range heights { clientStore.Delete(host.ConsensusStateKey(height)) } + + return nil } diff --git a/modules/core/03-connection/keeper/migrations.go b/modules/core/03-connection/keeper/migrations.go deleted file mode 100644 index 69e8d6067a7..00000000000 --- a/modules/core/03-connection/keeper/migrations.go +++ /dev/null @@ -1,25 +0,0 @@ -package keeper - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/cosmos/ibc-go/modules/core/03-connection/legacy/v100" -) - -// Migrator is a struct for handling in-place store migrations. -type Migrator struct { - keeper Keeper -} - -// NewMigrator returns a new Migrator. -func NewMigrator(keeper Keeper) Migrator { - return Migrator{keeper: keeper} -} - -// Migrate1to2 migrates from version 1 to 2. -// This migration prunes: -// -// - connections whose client has been deleted (solomachines) -func (m Migrator) Migrate1to2(ctx sdk.Context) error { - return v100.MigrateStore(ctx, m.keeper.storeKey, m.keeper.cdc) -} diff --git a/modules/core/03-connection/legacy/v100/store.go b/modules/core/03-connection/legacy/v100/store.go deleted file mode 100644 index 15adf63d0ee..00000000000 --- a/modules/core/03-connection/legacy/v100/store.go +++ /dev/null @@ -1,41 +0,0 @@ -package v100 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/cosmos/ibc-go/modules/core/03-connection/types" - host "github.com/cosmos/ibc-go/modules/core/24-host" -) - -// MigrateStore performs in-place store migrations from SDK v0.40 of the IBC module to v1.0.0 of ibc-go. -// The migration includes: -// -// - Pruning all connections whose client has been removed (solo machines) -func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) (err error) { - var connections []types.IdentifiedConnection - - // clients and connections use the same store key - store := ctx.KVStore(storeKey) - iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyConnectionPrefix)) - - defer iterator.Close() - for ; iterator.Valid(); iterator.Next() { - var connection types.ConnectionEnd - cdc.MustUnmarshal(iterator.Value(), &connection) - - bz := store.Get(host.FullClientStateKey(connection.ClientId)) - if bz == nil { - // client has been pruned, remove connection as well - connectionID := host.MustParseConnectionPath(string(iterator.Key())) - connections = append(connections, types.NewIdentifiedConnection(connectionID, connection)) - } - - } - - for _, conn := range connections { - store.Delete(host.ConnectionKey(conn.Id)) - } - - return nil -} diff --git a/modules/core/04-channel/keeper/migrations.go b/modules/core/04-channel/keeper/migrations.go deleted file mode 100644 index 71e861ac091..00000000000 --- a/modules/core/04-channel/keeper/migrations.go +++ /dev/null @@ -1,25 +0,0 @@ -package keeper - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/cosmos/ibc-go/modules/core/04-channel/legacy/v100" -) - -// Migrator is a struct for handling in-place store migrations. -type Migrator struct { - keeper Keeper -} - -// NewMigrator returns a new Migrator. -func NewMigrator(keeper Keeper) Migrator { - return Migrator{keeper: keeper} -} - -// Migrate1to2 migrates from version 1 to 2. -// This migration prunes: -// -// - channels whose connection has been deleted (solomachines) -func (m Migrator) Migrate1to2(ctx sdk.Context) error { - return v100.MigrateStore(ctx, m.keeper.storeKey, m.keeper.cdc) -} diff --git a/modules/core/04-channel/legacy/v100/store.go b/modules/core/04-channel/legacy/v100/store.go deleted file mode 100644 index 4b6188de451..00000000000 --- a/modules/core/04-channel/legacy/v100/store.go +++ /dev/null @@ -1,41 +0,0 @@ -package v100 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/cosmos/ibc-go/modules/core/04-channel/types" - host "github.com/cosmos/ibc-go/modules/core/24-host" -) - -// MigrateStore performs in-place store migrations from SDK v0.40 of the IBC module to v1.0.0 of ibc-go. -// The migration includes: -// -// - Pruning all channels whose connection has been removed (solo machines) -func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) (err error) { - var channels []types.IdentifiedChannel - - // connections and channels use the same store key - store := ctx.KVStore(storeKey) - - iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyChannelEndPrefix)) - - defer iterator.Close() - for ; iterator.Valid(); iterator.Next() { - var channel types.Channel - cdc.MustUnmarshal(iterator.Value(), &channel) - - bz := store.Get(host.ConnectionKey(channel.ConnectionHops[0])) - if bz == nil { - // connection has been pruned, remove channel as well - portID, channelID := host.MustParseChannelPath(string(iterator.Key())) - channels = append(channels, types.NewIdentifiedChannel(portID, channelID, channel)) - } - } - - for _, channel := range channels { - store.Delete(host.ChannelKey(channel.PortId, channel.ChannelId)) - } - - return nil -} diff --git a/modules/core/keeper/migrations.go b/modules/core/keeper/migrations.go index 526fd404192..7e789ba7161 100644 --- a/modules/core/keeper/migrations.go +++ b/modules/core/keeper/migrations.go @@ -20,28 +20,16 @@ func NewMigrator(keeper Keeper) Migrator { // Migrate1to2 migrates from version 1 to 2. // This migration prunes: -// - solo machine clients -// - connections using solo machines -// - channels using solo machines +// - solo machine consensus states // - expired tendermint consensus states // -// Connections are removed if the associated client does not exist. -// Channels are removed if the associated connection does not exist. +// This migration migrates: +// - solo machine client state from protobuf definition v1 to v2 func (m Migrator) Migrate1to2(ctx sdk.Context) error { clientMigrator := clientkeeper.NewMigrator(m.keeper.ClientKeeper) if err := clientMigrator.Migrate1to2(ctx); err != nil { return err } - connectionMigrator := connectionkeeper.NewMigrator(m.keeper.ConnectionKeeper) - if err := connectionMigrator.Migrate1to2(ctx); err != nil { - return err - } - - channelMigrator := channelkeeper.NewMigrator(m.keeper.ChannelKeeper) - if err := channelMigrator.Migrate1to2(ctx); err != nil { - return err - } - return nil } diff --git a/modules/light-clients/07-tendermint/types/tendermint.pb.go b/modules/light-clients/07-tendermint/types/tendermint.pb.go index 84a79b66668..9a0645a4462 100644 --- a/modules/light-clients/07-tendermint/types/tendermint.pb.go +++ b/modules/light-clients/07-tendermint/types/tendermint.pb.go @@ -11,10 +11,10 @@ import ( _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - _ "github.com/golang/protobuf/ptypes/duration" - _ "github.com/golang/protobuf/ptypes/timestamp" github_com_tendermint_tendermint_libs_bytes "github.com/tendermint/tendermint/libs/bytes" types2 "github.com/tendermint/tendermint/proto/tendermint/types" + _ "google.golang.org/protobuf/types/known/durationpb" + _ "google.golang.org/protobuf/types/known/timestamppb" io "io" math "math" math_bits "math/bits" diff --git a/proto/ibc/lightclients/solomachine/v1/solomachine.proto b/proto/ibc/lightclients/solomachine/v1/solomachine.proto new file mode 100644 index 00000000000..4ba0da259a7 --- /dev/null +++ b/proto/ibc/lightclients/solomachine/v1/solomachine.proto @@ -0,0 +1,189 @@ +syntax = "proto3"; + +package ibc.lightclients.solomachine.v1; + +option go_package = "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100"; + +import "ibc/core/connection/v1/connection.proto"; +import "ibc/core/channel/v1/channel.proto"; +import "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; + +// ClientState defines a solo machine client that tracks the current consensus +// state and if the client is frozen. +message ClientState { + option (gogoproto.goproto_getters) = false; + // latest sequence of the client state + uint64 sequence = 1; + // frozen sequence of the solo machine + uint64 frozen_sequence = 2 [(gogoproto.moretags) = "yaml:\"frozen_sequence\""]; + ConsensusState consensus_state = 3 [(gogoproto.moretags) = "yaml:\"consensus_state\""]; + // when set to true, will allow governance to update a solo machine client. + // The client will be unfrozen if it is frozen. + bool allow_update_after_proposal = 4 [(gogoproto.moretags) = "yaml:\"allow_update_after_proposal\""]; +} + +// ConsensusState defines a solo machine consensus state. The sequence of a +// consensus state is contained in the "height" key used in storing the +// consensus state. +message ConsensusState { + option (gogoproto.goproto_getters) = false; + // public key of the solo machine + google.protobuf.Any public_key = 1 [(gogoproto.moretags) = "yaml:\"public_key\""]; + // diversifier allows the same public key to be re-used across different solo + // machine clients (potentially on different chains) without being considered + // misbehaviour. + string diversifier = 2; + uint64 timestamp = 3; +} + +// Header defines a solo machine consensus header +message Header { + option (gogoproto.goproto_getters) = false; + // sequence to update solo machine public key at + uint64 sequence = 1; + uint64 timestamp = 2; + bytes signature = 3; + google.protobuf.Any new_public_key = 4 [(gogoproto.moretags) = "yaml:\"new_public_key\""]; + string new_diversifier = 5 [(gogoproto.moretags) = "yaml:\"new_diversifier\""]; +} + +// Misbehaviour defines misbehaviour for a solo machine which consists +// of a sequence and two signatures over different messages at that sequence. +message Misbehaviour { + option (gogoproto.goproto_getters) = false; + string client_id = 1 [(gogoproto.moretags) = "yaml:\"client_id\""]; + uint64 sequence = 2; + SignatureAndData signature_one = 3 [(gogoproto.moretags) = "yaml:\"signature_one\""]; + SignatureAndData signature_two = 4 [(gogoproto.moretags) = "yaml:\"signature_two\""]; +} + +// SignatureAndData contains a signature and the data signed over to create that +// signature. +message SignatureAndData { + option (gogoproto.goproto_getters) = false; + bytes signature = 1; + DataType data_type = 2 [(gogoproto.moretags) = "yaml:\"data_type\""]; + bytes data = 3; + uint64 timestamp = 4; +} + +// TimestampedSignatureData contains the signature data and the timestamp of the +// signature. +message TimestampedSignatureData { + option (gogoproto.goproto_getters) = false; + bytes signature_data = 1 [(gogoproto.moretags) = "yaml:\"signature_data\""]; + uint64 timestamp = 2; +} + +// SignBytes defines the signed bytes used for signature verification. +message SignBytes { + option (gogoproto.goproto_getters) = false; + + uint64 sequence = 1; + uint64 timestamp = 2; + string diversifier = 3; + // type of the data used + DataType data_type = 4 [(gogoproto.moretags) = "yaml:\"data_type\""]; + // marshaled data + bytes data = 5; +} + +// DataType defines the type of solo machine proof being created. This is done +// to preserve uniqueness of different data sign byte encodings. +enum DataType { + option (gogoproto.goproto_enum_prefix) = false; + + // Default State + DATA_TYPE_UNINITIALIZED_UNSPECIFIED = 0 [(gogoproto.enumvalue_customname) = "UNSPECIFIED"]; + // Data type for client state verification + DATA_TYPE_CLIENT_STATE = 1 [(gogoproto.enumvalue_customname) = "CLIENT"]; + // Data type for consensus state verification + DATA_TYPE_CONSENSUS_STATE = 2 [(gogoproto.enumvalue_customname) = "CONSENSUS"]; + // Data type for connection state verification + DATA_TYPE_CONNECTION_STATE = 3 [(gogoproto.enumvalue_customname) = "CONNECTION"]; + // Data type for channel state verification + DATA_TYPE_CHANNEL_STATE = 4 [(gogoproto.enumvalue_customname) = "CHANNEL"]; + // Data type for packet commitment verification + DATA_TYPE_PACKET_COMMITMENT = 5 [(gogoproto.enumvalue_customname) = "PACKETCOMMITMENT"]; + // Data type for packet acknowledgement verification + DATA_TYPE_PACKET_ACKNOWLEDGEMENT = 6 [(gogoproto.enumvalue_customname) = "PACKETACKNOWLEDGEMENT"]; + // Data type for packet receipt absence verification + DATA_TYPE_PACKET_RECEIPT_ABSENCE = 7 [(gogoproto.enumvalue_customname) = "PACKETRECEIPTABSENCE"]; + // Data type for next sequence recv verification + DATA_TYPE_NEXT_SEQUENCE_RECV = 8 [(gogoproto.enumvalue_customname) = "NEXTSEQUENCERECV"]; + // Data type for header verification + DATA_TYPE_HEADER = 9 [(gogoproto.enumvalue_customname) = "HEADER"]; +} + +// HeaderData returns the SignBytes data for update verification. +message HeaderData { + option (gogoproto.goproto_getters) = false; + + // header public key + google.protobuf.Any new_pub_key = 1 [(gogoproto.moretags) = "yaml:\"new_pub_key\""]; + // header diversifier + string new_diversifier = 2 [(gogoproto.moretags) = "yaml:\"new_diversifier\""]; +} + +// ClientStateData returns the SignBytes data for client state verification. +message ClientStateData { + option (gogoproto.goproto_getters) = false; + + bytes path = 1; + google.protobuf.Any client_state = 2 [(gogoproto.moretags) = "yaml:\"client_state\""]; +} + +// ConsensusStateData returns the SignBytes data for consensus state +// verification. +message ConsensusStateData { + option (gogoproto.goproto_getters) = false; + + bytes path = 1; + google.protobuf.Any consensus_state = 2 [(gogoproto.moretags) = "yaml:\"consensus_state\""]; +} + +// ConnectionStateData returns the SignBytes data for connection state +// verification. +message ConnectionStateData { + option (gogoproto.goproto_getters) = false; + + bytes path = 1; + ibc.core.connection.v1.ConnectionEnd connection = 2; +} + +// ChannelStateData returns the SignBytes data for channel state +// verification. +message ChannelStateData { + option (gogoproto.goproto_getters) = false; + + bytes path = 1; + ibc.core.channel.v1.Channel channel = 2; +} + +// PacketCommitmentData returns the SignBytes data for packet commitment +// verification. +message PacketCommitmentData { + bytes path = 1; + bytes commitment = 2; +} + +// PacketAcknowledgementData returns the SignBytes data for acknowledgement +// verification. +message PacketAcknowledgementData { + bytes path = 1; + bytes acknowledgement = 2; +} + +// PacketReceiptAbsenceData returns the SignBytes data for +// packet receipt absence verification. +message PacketReceiptAbsenceData { + bytes path = 1; +} + +// NextSequenceRecvData returns the SignBytes data for verification of the next +// sequence to be received. +message NextSequenceRecvData { + bytes path = 1; + uint64 next_seq_recv = 2 [(gogoproto.moretags) = "yaml:\"next_seq_recv\""]; +} From db0db211f3bff70a430de9b9f85b66a3b4b1eb18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Colin=20Axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Mon, 7 Jun 2021 13:24:42 +0200 Subject: [PATCH 04/21] fix build --- modules/core/02-client/keeper/migrations.go | 8 +++----- modules/core/02-client/legacy/v100/store.go | 21 +++++++++++---------- modules/core/keeper/migrations.go | 2 -- 3 files changed, 14 insertions(+), 17 deletions(-) diff --git a/modules/core/02-client/keeper/migrations.go b/modules/core/02-client/keeper/migrations.go index 6200195acb6..3076b628129 100644 --- a/modules/core/02-client/keeper/migrations.go +++ b/modules/core/02-client/keeper/migrations.go @@ -18,13 +18,11 @@ func NewMigrator(keeper Keeper) Migrator { // Migrate1to2 migrates from version 1 to 2. // This migration prunes: -// - solo machine clients -// - connections using solo machines -// - channels using solo machines +// - solo machine consensus states // - expired tendermint consensus states // -// Connections are removed if the associated client does not exist. -// Channels are removed if the associated connection does not exist. +// This migration migrates: +// - solo machine client states from v1 to v2 protobuf definition func (m Migrator) Migrate1to2(ctx sdk.Context) error { return v100.MigrateStore(ctx, m.keeper.storeKey, m.keeper.cdc) } diff --git a/modules/core/02-client/legacy/v100/store.go b/modules/core/02-client/legacy/v100/store.go index 7c558e1395e..df0ae13e73b 100644 --- a/modules/core/02-client/legacy/v100/store.go +++ b/modules/core/02-client/legacy/v100/store.go @@ -52,22 +52,23 @@ func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) switch clientType { case exported.Solomachine: - migrateSolomachine(clientStore, clientID) + migrateSolomachine(clientStore, cdc, clientID) case exported.Tendermint: - clientPrefix := []byte(fmt.Sprintf("%s/%s/", host.KeyClientStorePrefix, client.ClientId)) + clientPrefix := []byte(fmt.Sprintf("%s/%s/", host.KeyClientStorePrefix, clientID)) clientStore := prefix.NewStore(ctx.KVStore(storeKey), clientPrefix) - clientState, err := types.UnpackClientState(client.ClientState) - if err != nil { - return err + // get tendermint client state from client store + bz := clientStore.Get(host.ClientStateKey()) + if bz == nil { + return clienttypes.ErrClientNotFound } - // ensure client is tendermint type - tmClientState, ok := clientState.(*ibctmtypes.ClientState) - if !ok { - panic("client with identifier '07-tendermint' is not tendermint type!") + var clientState *ibctmtypes.ClientState + if err := cdc.UnmarshalInterface(bz, &clientState); err != nil { + return err } - if err = ibctmtypes.PruneAllExpiredConsensusStates(ctx, clientStore, cdc, tmClientState); err != nil { + + if err = ibctmtypes.PruneAllExpiredConsensusStates(ctx, clientStore, cdc, clientState); err != nil { return err } diff --git a/modules/core/keeper/migrations.go b/modules/core/keeper/migrations.go index 7e789ba7161..801a91bc42f 100644 --- a/modules/core/keeper/migrations.go +++ b/modules/core/keeper/migrations.go @@ -4,8 +4,6 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" clientkeeper "github.com/cosmos/ibc-go/modules/core/02-client/keeper" - connectionkeeper "github.com/cosmos/ibc-go/modules/core/03-connection/keeper" - channelkeeper "github.com/cosmos/ibc-go/modules/core/04-channel/keeper" ) // Migrator is a struct for handling in-place store migrations. From 972ca3c4c40211e386240b6647c683799b7671b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Colin=20Axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Mon, 7 Jun 2021 18:11:18 +0200 Subject: [PATCH 05/21] add genesis migration --- modules/core/02-client/legacy/v100/genesis.go | 102 ++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 modules/core/02-client/legacy/v100/genesis.go diff --git a/modules/core/02-client/legacy/v100/genesis.go b/modules/core/02-client/legacy/v100/genesis.go new file mode 100644 index 00000000000..b6dead93c92 --- /dev/null +++ b/modules/core/02-client/legacy/v100/genesis.go @@ -0,0 +1,102 @@ +package v100 + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + "github.com/cosmos/ibc-go/modules/core/02-client/types" + "github.com/cosmos/ibc-go/modules/core/exported" + smtypes "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types" + ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types" +) + +// Migrate accepts exported v0.39 x/auth and v0.38 x/bank genesis state and +// migrates it to v0.40 x/bank genesis state. The migration includes: +// +// - Moving balances from x/auth to x/bank genesis state. +// - Moving supply from x/supply to x/bank genesis state. +// - Re-encode in v0.40 GenesisState. +func Migrate( + clientGenState *types.GenesisState, +) (*types.GenesisState, error) { + for i, client := range clientGenState.Clients { + clientType, _, err := types.ParseClientIdentifier(client.ClientId) + if err != nil { + return nil, err + } + + if clientType == exported.Solomachine { + // unpack any + clientState, ok := client.ClientState.GetCachedValue().(*ClientState) + if !ok { + return nil, sdkerrors.Wrapf(sdkerrors.ErrUnpackAny, "cannot unpack Any into ClientState %T", client.ClientState) + } + + isFrozen := clientState.FrozenSequence != 0 + consensusState := &smtypes.ConsensusState{ + PublicKey: clientState.ConsensusState.PublicKey, + Diversifier: clientState.ConsensusState.Diversifier, + Timestamp: clientState.ConsensusState.Timestamp, + } + + newSolomachine := &smtypes.ClientState{ + Sequence: clientState.Sequence, + IsFrozen: isFrozen, + ConsensusState: consensusState, + AllowUpdateAfterProposal: clientState.AllowUpdateAfterProposal, + } + + any, err := types.PackClientState(newSolomachine) + if err != nil { + return nil, err + } + + clientGenState.Clients[i] = types.IdentifiedClientState{ + ClientId: client.ClientId, + ClientState: any, + } + } + + var smIndiciesToRemove []int + for i, clientConsensusState := range clientGenState.ClientsConsensus { + // found consensus state, prune as necessary + if clientConsensusState.ClientId == client.ClientId { + switch clientType { + case exported.Solomachine: + // remove all consensus states for the solo machine + smIndiciesToRemove = append(smIndiciesToRemove, i) + case exported.Tendermint: + // prune expired consensus state + tmClientState, ok := client.ClientState.GetCachedValue().(*ibctmtypes.ClientState) + if !ok { + return nil, clienttypes.Err + } + + var consStateIndiciesToRemove []int + for i, consState := range clientConsensusState.ConsensusStates { + tmConsState := consState.ConsensusState.GetCachedValue().(*ibctmtypes.ConsensusState) + if tmClientState.IsExpired(tmConsState.Timestamp, blockTime) { + consStateIndiciesToRemove = append(consStateIndiciesToRemove, i) + } + } + + for _, index := range consStateIndiciesToRemove { + clientGenState.ClientsConsensus[i] = types.ClientConsensusStates{ + ClientId: clientConsensusState.ClientId, + ConsensusStates: append(clientConsensusState.ConsensusStates[:index], clientConsensusState.ConsensusStates[index+1:]...), + } + } + + default: + break + } + } + } + + for _, index := range smIndiciesToRemove { + clientGenState.ClientsConsensus = append(clientGenState.ClientsConsensus[:index], clientGenState.ClientsConsensus[index+1:]...) + } + + } + + return clientGenState, nil +} From 314aaba10b4a00bb2a391817f880683fcb55600b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Colin=20Axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Tue, 8 Jun 2021 13:24:00 +0200 Subject: [PATCH 06/21] code cleanup --- modules/core/02-client/legacy/v100/genesis.go | 88 ++++++++++++------- modules/core/02-client/legacy/v100/store.go | 64 ++++++-------- .../core/02-client/legacy/v100/store_test.go | 1 + 3 files changed, 84 insertions(+), 69 deletions(-) create mode 100644 modules/core/02-client/legacy/v100/store_test.go diff --git a/modules/core/02-client/legacy/v100/genesis.go b/modules/core/02-client/legacy/v100/genesis.go index b6dead93c92..9fc9c09acdd 100644 --- a/modules/core/02-client/legacy/v100/genesis.go +++ b/modules/core/02-client/legacy/v100/genesis.go @@ -1,51 +1,39 @@ package v100 import ( + "bytes" + "time" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" "github.com/cosmos/ibc-go/modules/core/02-client/types" "github.com/cosmos/ibc-go/modules/core/exported" - smtypes "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types" ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types" ) -// Migrate accepts exported v0.39 x/auth and v0.38 x/bank genesis state and -// migrates it to v0.40 x/bank genesis state. The migration includes: +// Migrate accepts exported v1.0.0 IBC client genesis file and migrates it to: // -// - Moving balances from x/auth to x/bank genesis state. -// - Moving supply from x/supply to x/bank genesis state. -// - Re-encode in v0.40 GenesisState. -func Migrate( - clientGenState *types.GenesisState, -) (*types.GenesisState, error) { +// - Update solo machine client state protobuf definition (v1 to v2) +// - Remove all solo machine consensus states +// - Remove all expired tendermint consensus states +func Migrate(clientGenState *types.GenesisState, genesisBlockTime time.Time) (*types.GenesisState, error) { + for i, client := range clientGenState.Clients { clientType, _, err := types.ParseClientIdentifier(client.ClientId) if err != nil { return nil, err } + // update solo machine client state defintions if clientType == exported.Solomachine { - // unpack any clientState, ok := client.ClientState.GetCachedValue().(*ClientState) if !ok { return nil, sdkerrors.Wrapf(sdkerrors.ErrUnpackAny, "cannot unpack Any into ClientState %T", client.ClientState) } - isFrozen := clientState.FrozenSequence != 0 - consensusState := &smtypes.ConsensusState{ - PublicKey: clientState.ConsensusState.PublicKey, - Diversifier: clientState.ConsensusState.Diversifier, - Timestamp: clientState.ConsensusState.Timestamp, - } + updatedClientState := migrateSolomachine(clientState) - newSolomachine := &smtypes.ClientState{ - Sequence: clientState.Sequence, - IsFrozen: isFrozen, - ConsensusState: consensusState, - AllowUpdateAfterProposal: clientState.AllowUpdateAfterProposal, - } - - any, err := types.PackClientState(newSolomachine) + any, err := types.PackClientState(updatedClientState) if err != nil { return nil, err } @@ -56,30 +44,62 @@ func Migrate( } } - var smIndiciesToRemove []int + // collect the client consensus state index for solo machine clients + var smConsStateByIndex []int + + // iterate consensus states for i, clientConsensusState := range clientGenState.ClientsConsensus { - // found consensus state, prune as necessary + // look for consensus states for the current client if clientConsensusState.ClientId == client.ClientId { switch clientType { case exported.Solomachine: // remove all consensus states for the solo machine - smIndiciesToRemove = append(smIndiciesToRemove, i) + smConsStateByIndex = append(smConsStateByIndex, i) case exported.Tendermint: // prune expired consensus state tmClientState, ok := client.ClientState.GetCachedValue().(*ibctmtypes.ClientState) if !ok { - return nil, clienttypes.Err + return nil, types.ErrInvalidClient } - var consStateIndiciesToRemove []int + // collect the consensus state index for expired tendermint consensus states + var tmConsStateByIndex []int + for i, consState := range clientConsensusState.ConsensusStates { tmConsState := consState.ConsensusState.GetCachedValue().(*ibctmtypes.ConsensusState) - if tmClientState.IsExpired(tmConsState.Timestamp, blockTime) { - consStateIndiciesToRemove = append(consStateIndiciesToRemove, i) + if tmClientState.IsExpired(tmConsState.Timestamp, genesisBlockTime) { + tmConsStateByIndex = append(tmConsStateByIndex, i) } } - for _, index := range consStateIndiciesToRemove { + // remove all expired tendermint consensus states + for _, index := range tmConsStateByIndex { + for i, identifiedGenMetadata := range clientGenState.ClientsMetadata { + // look for metadata for current client + if identifiedGenMetadata.ClientId == client.ClientId { + + // collect the metadata indicies to be removed + var tmConsMetadataByIndex []int + + // obtain height for consensus state being pruned + height := clientConsensusState.ConsensusStates[index].Height + + // iterate throught metadata and find metadata which should be pruned + for j, metadata := range identifiedGenMetadata.ClientMetadata { + if bytes.Equal(metadata.Key, ibctmtypes.IterationKey(height)) || + bytes.Equal(metadata.Key, ibctmtypes.ProcessedTimeKey(height)) || + bytes.Equal(metadata.Key, ibctmtypes.ProcessedHeightKey(height)) { + tmConsMetadataByIndex = append(tmConsMetadataByIndex, j) + } + } + + for _, metadataIndex := range tmConsMetadataByIndex { + clientGenState.ClientsMetadata[i].ClientMetadata = append(clientGenState.ClientsMetadata[i].ClientMetadata[:metadataIndex], clientGenState.ClientsMetadata[i].ClientMetadata[metadataIndex+1:]...) + } + } + } + + // remove client state clientGenState.ClientsConsensus[i] = types.ClientConsensusStates{ ClientId: clientConsensusState.ClientId, ConsensusStates: append(clientConsensusState.ConsensusStates[:index], clientConsensusState.ConsensusStates[index+1:]...), @@ -92,10 +112,10 @@ func Migrate( } } - for _, index := range smIndiciesToRemove { + // remove all solo machine consensus states + for _, index := range smConsStateByIndex { clientGenState.ClientsConsensus = append(clientGenState.ClientsConsensus[:index], clientGenState.ClientsConsensus[index+1:]...) } - } return clientGenState, nil diff --git a/modules/core/02-client/legacy/v100/store.go b/modules/core/02-client/legacy/v100/store.go index df0ae13e73b..70beceeb085 100644 --- a/modules/core/02-client/legacy/v100/store.go +++ b/modules/core/02-client/legacy/v100/store.go @@ -20,7 +20,7 @@ import ( // The migration includes: // // - Migrating solo machine client states from v1 to v2 protobuf definition -// - Pruning all solo machine consensus states from the client stores +// - Pruning all solo machine consensus states // - Pruning expired tendermint consensus states func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) (err error) { store := ctx.KVStore(storeKey) @@ -50,19 +50,31 @@ func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) clientPrefix := []byte(fmt.Sprintf("%s/%s/", host.KeyClientStorePrefix, clientID)) clientStore := prefix.NewStore(ctx.KVStore(storeKey), clientPrefix) + bz := clientStore.Get(host.ClientStateKey()) + if bz == nil { + return clienttypes.ErrClientNotFound + } + switch clientType { case exported.Solomachine: - migrateSolomachine(clientStore, cdc, clientID) + var clientState *ClientState + if err := cdc.UnmarshalInterface(bz, &clientState); err != nil { + return err + } - case exported.Tendermint: - clientPrefix := []byte(fmt.Sprintf("%s/%s/", host.KeyClientStorePrefix, clientID)) - clientStore := prefix.NewStore(ctx.KVStore(storeKey), clientPrefix) - // get tendermint client state from client store - bz := clientStore.Get(host.ClientStateKey()) - if bz == nil { - return clienttypes.ErrClientNotFound + updatedClientState := migrateSolomachine(clientState) + + bz, err := clienttypes.MarshalClientState(cdc, updatedClientState) + if err != nil { + return err } + // update solomachine in store + clientStore.Set(host.ClientStateKey(), bz) + + pruneSolomachineConsensusStates(clientStore) + + case exported.Tendermint: var clientState *ibctmtypes.ClientState if err := cdc.UnmarshalInterface(bz, &clientState); err != nil { return err @@ -81,20 +93,7 @@ func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) } // migrateSolomachine migrates the solomachine from v1 to v2 solo machine protobuf defintion. -// It also deletes all consensus states stored in the client store as they are not necessary -// and reference the v1 consensus state type. -func migrateSolomachine(clientStore sdk.KVStore, cdc codec.BinaryCodec, clientID string) error { - // get legacy solo machine from client store - bz := clientStore.Get(host.ClientStateKey()) - if bz == nil { - return clienttypes.ErrClientNotFound - } - - var clientState *ClientState - if err := cdc.UnmarshalInterface(bz, &clientState); err != nil { - return err - } - +func migrateSolomachine(clientState *ClientState) *smtypes.ClientState { isFrozen := clientState.FrozenSequence != 0 consensusState := &smtypes.ConsensusState{ PublicKey: clientState.ConsensusState.PublicKey, @@ -102,22 +101,17 @@ func migrateSolomachine(clientStore sdk.KVStore, cdc codec.BinaryCodec, clientID Timestamp: clientState.ConsensusState.Timestamp, } - newSolomachine := &smtypes.ClientState{ + return &smtypes.ClientState{ Sequence: clientState.Sequence, IsFrozen: isFrozen, ConsensusState: consensusState, AllowUpdateAfterProposal: clientState.AllowUpdateAfterProposal, } +} - bz, err := clienttypes.MarshalClientState(cdc, newSolomachine) - if err != nil { - return err - } - - // update solomachine in store - clientStore.Set(host.ClientStateKey(), bz) - - // collect consensus states to be pruned +// pruneSolomachineConsensusStates removes all solomachine consensus states from the +// client store. +func pruneSolomachineConsensusStates(clientStore sdk.KVStore) { iterator := sdk.KVStorePrefixIterator(clientStore, []byte(host.KeyConsensusStatePrefix)) var heights []exported.Height @@ -128,6 +122,8 @@ func migrateSolomachine(clientStore sdk.KVStore, cdc codec.BinaryCodec, clientID if len(keySplit) != 4 || keySplit[2] != string(host.KeyConsensusStatePrefix) { continue } + + // collect consensus states to be pruned heights = append(heights, types.MustParseHeight(keySplit[3])) } @@ -135,6 +131,4 @@ func migrateSolomachine(clientStore sdk.KVStore, cdc codec.BinaryCodec, clientID for _, height := range heights { clientStore.Delete(host.ConsensusStateKey(height)) } - - return nil } diff --git a/modules/core/02-client/legacy/v100/store_test.go b/modules/core/02-client/legacy/v100/store_test.go new file mode 100644 index 00000000000..282a0a372db --- /dev/null +++ b/modules/core/02-client/legacy/v100/store_test.go @@ -0,0 +1 @@ +package v100 From 863fdbd456cc9ad6ddaaab51c315f240000972ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Colin=20Axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Tue, 8 Jun 2021 16:34:04 +0200 Subject: [PATCH 07/21] add store migration test for expired tendermint consensus states --- modules/core/02-client/legacy/v100/store.go | 9 +- .../core/02-client/legacy/v100/store_test.go | 120 +++++++++++++++++- .../07-tendermint/types/update_test.go | 12 ++ 3 files changed, 136 insertions(+), 5 deletions(-) diff --git a/modules/core/02-client/legacy/v100/store.go b/modules/core/02-client/legacy/v100/store.go index 70beceeb085..52af3de74ae 100644 --- a/modules/core/02-client/legacy/v100/store.go +++ b/modules/core/02-client/legacy/v100/store.go @@ -7,6 +7,7 @@ import ( "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/store/prefix" sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" "github.com/cosmos/ibc-go/modules/core/02-client/types" clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types" @@ -58,7 +59,7 @@ func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) switch clientType { case exported.Solomachine: var clientState *ClientState - if err := cdc.UnmarshalInterface(bz, &clientState); err != nil { + if err := cdc.Unmarshal(bz, clientState); err != nil { return err } @@ -75,12 +76,12 @@ func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) pruneSolomachineConsensusStates(clientStore) case exported.Tendermint: - var clientState *ibctmtypes.ClientState + var clientState exported.ClientState if err := cdc.UnmarshalInterface(bz, &clientState); err != nil { - return err + return sdkerrors.Wrap(err, "failed to unmarshal client state bytes into tendermint client state") } - if err = ibctmtypes.PruneAllExpiredConsensusStates(ctx, clientStore, cdc, clientState); err != nil { + if err = ibctmtypes.PruneAllExpiredConsensusStates(ctx, clientStore, cdc, clientState.(*ibctmtypes.ClientState)); err != nil { return err } diff --git a/modules/core/02-client/legacy/v100/store_test.go b/modules/core/02-client/legacy/v100/store_test.go index 282a0a372db..b5134fff691 100644 --- a/modules/core/02-client/legacy/v100/store_test.go +++ b/modules/core/02-client/legacy/v100/store_test.go @@ -1 +1,119 @@ -package v100 +package v100_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/suite" + + "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100" + host "github.com/cosmos/ibc-go/modules/core/24-host" + "github.com/cosmos/ibc-go/modules/core/exported" + ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/ibc-go/testing" +) + +type LegacyTestSuite struct { + suite.Suite + + coordinator *ibctesting.Coordinator + + // testing chains used for convenience and readability + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain +} + +// TestLegacyTestSuite runs all the tests within this package. +func TestLegacyTestSuite(t *testing.T) { + suite.Run(t, new(LegacyTestSuite)) +} + +// SetupTest creates a coordinator with 2 test chains. +func (suite *LegacyTestSuite) SetupTest() { + suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) + suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0)) + suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1)) + // commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1) + suite.coordinator.CommitNBlocks(suite.chainA, 2) + suite.coordinator.CommitNBlocks(suite.chainB, 2) +} + +// only test migration for solo machines +// ensure all client states are migrated and all consensus states +// are removed +func (suite *LegacyTestSuite) TestMigrateStoreSolomachine() { + +} + +// only test migration for tendermint clients +// ensure all expired consensus states are removed from tendermint client stores +func (suite *LegacyTestSuite) TestMigrateStoreTendermint() { + // create path and setup clients + path := ibctesting.NewPath(suite.chainA, suite.chainB) + suite.coordinator.SetupClients(path) + + // collect all heights expected to be pruned + var pruneHeights []exported.Height + pruneHeights = append(pruneHeights, path.EndpointA.GetClientState().GetLatestHeight()) + + // these heights will be expired and also pruned + for i := 0; i < 3; i++ { + path.EndpointA.UpdateClient() + pruneHeights = append(pruneHeights, path.EndpointA.GetClientState().GetLatestHeight()) + } + + // double chedck all information is currently stored + for _, pruneHeight := range pruneHeights { + consState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, pruneHeight) + suite.Require().True(ok) + suite.Require().NotNil(consState) + + ctx := path.EndpointA.Chain.GetContext() + clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID) + + processedTime, ok := ibctmtypes.GetProcessedTime(clientStore, pruneHeight) + suite.Require().True(ok) + suite.Require().NotNil(processedTime) + + processedHeight, ok := ibctmtypes.GetProcessedHeight(clientStore, pruneHeight) + suite.Require().True(ok) + suite.Require().NotNil(processedHeight) + + expectedConsKey := ibctmtypes.GetIterationKey(clientStore, pruneHeight) + suite.Require().NotNil(expectedConsKey) + } + + // Increment the time by a week + suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + + // create the consensus state that can be used as trusted height for next update + path.EndpointA.UpdateClient() + + // Increment the time by another week, then update the client. + // This will cause the consensus states created before the first time increment + // to be expired + suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + err := v100.MigrateStore(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.GetSimApp().GetKey(host.StoreKey), path.EndpointA.Chain.App.AppCodec()) + suite.Require().NoError(err) + + ctx := path.EndpointA.Chain.GetContext() + clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID) + + // ensure everything has been pruned + for i, pruneHeight := range pruneHeights { + consState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, pruneHeight) + suite.Require().False(ok, i) + suite.Require().Nil(consState, i) + + processedTime, ok := ibctmtypes.GetProcessedTime(clientStore, pruneHeight) + suite.Require().False(ok, i) + suite.Require().Equal(uint64(0), processedTime, i) + + processedHeight, ok := ibctmtypes.GetProcessedHeight(clientStore, pruneHeight) + suite.Require().False(ok, i) + suite.Require().Nil(processedHeight, i) + + expectedConsKey := ibctmtypes.GetIterationKey(clientStore, pruneHeight) + suite.Require().Nil(expectedConsKey, i) + } +} diff --git a/modules/light-clients/07-tendermint/types/update_test.go b/modules/light-clients/07-tendermint/types/update_test.go index b93168b5166..6626e73b5da 100644 --- a/modules/light-clients/07-tendermint/types/update_test.go +++ b/modules/light-clients/07-tendermint/types/update_test.go @@ -400,6 +400,8 @@ func (suite *TendermintTestSuite) TestPruneConsensusState() { clientStore = path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID) expectedProcessTime, ok := types.GetProcessedTime(clientStore, expiredHeight) suite.Require().True(ok) + expectedProcessHeight, ok := types.GetProcessedHeight(clientStore, expiredHeight) + suite.Require().True(ok) expectedConsKey := types.GetIterationKey(clientStore, expiredHeight) suite.Require().NotNil(expectedConsKey) @@ -425,6 +427,10 @@ func (suite *TendermintTestSuite) TestPruneConsensusState() { processTime, ok := types.GetProcessedTime(clientStore, pruneHeight) suite.Require().Equal(uint64(0), processTime, "processed time metadata not pruned") suite.Require().False(ok) + processHeight, ok := types.GetProcessedHeight(clientStore, pruneHeight) + suite.Require().Equal(uint64(0), processHeight, "processed height metadata not pruned") + suite.Require().False(ok) + // check iteration key metadata is pruned consKey := types.GetIterationKey(clientStore, pruneHeight) suite.Require().Nil(consKey, "iteration key not pruned") @@ -438,6 +444,12 @@ func (suite *TendermintTestSuite) TestPruneConsensusState() { processTime, ok = types.GetProcessedTime(clientStore, expiredHeight) suite.Require().Equal(expectedProcessTime, processTime, "processed time metadata incorrectly pruned") suite.Require().True(ok) + + // check processed height metadata is not pruned + processHeight, ok = types.GetProcessedHeight(clientStore, expiredHeight) + suite.Require().Equal(expectedProcessHeight, processHeight, "processed height metadata incorrectly pruned") + suite.Require().True(ok) + // check iteration key metadata is not pruned consKey = types.GetIterationKey(clientStore, expiredHeight) suite.Require().Equal(expectedConsKey, consKey, "iteration key incorrectly pruned") From 863b50a234f696efb29c56e84aa11b080747c6ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Colin=20Axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Wed, 9 Jun 2021 13:03:10 +0200 Subject: [PATCH 08/21] finish adding in place migration store tests --- .../core/02-client/legacy/v100/solomachine.go | 19 +++++ modules/core/02-client/legacy/v100/store.go | 22 +++--- .../core/02-client/legacy/v100/store_test.go | 70 +++++++++++++++++++ 3 files changed, 103 insertions(+), 8 deletions(-) create mode 100644 modules/core/02-client/legacy/v100/solomachine.go diff --git a/modules/core/02-client/legacy/v100/solomachine.go b/modules/core/02-client/legacy/v100/solomachine.go new file mode 100644 index 00000000000..57939cd32c4 --- /dev/null +++ b/modules/core/02-client/legacy/v100/solomachine.go @@ -0,0 +1,19 @@ +package v100 + +import ( + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" +) + +// Interface implementation checks. +var _, _ codectypes.UnpackInterfacesMessage = &ClientState{}, &ConsensusState{} + +// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method +func (cs ClientState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return cs.ConsensusState.UnpackInterfaces(unpacker) +} + +// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method +func (cs ConsensusState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return unpacker.UnpackAny(cs.PublicKey, new(cryptotypes.PubKey)) +} diff --git a/modules/core/02-client/legacy/v100/store.go b/modules/core/02-client/legacy/v100/store.go index 52af3de74ae..1ca2e2fa856 100644 --- a/modules/core/02-client/legacy/v100/store.go +++ b/modules/core/02-client/legacy/v100/store.go @@ -5,6 +5,7 @@ import ( "strings" "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/store/prefix" sdk "github.com/cosmos/cosmos-sdk/types" sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" @@ -29,6 +30,7 @@ func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) var clients []string + // collect all clients defer iterator.Close() for ; iterator.Valid(); iterator.Next() { keySplit := strings.Split(string(iterator.Key()), "/") @@ -39,7 +41,6 @@ func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) // key is clients/{clientid}/clientState // Thus, keySplit[1] is clientID clients = append(clients, keySplit[1]) - } for _, clientID := range clients { @@ -58,16 +59,21 @@ func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) switch clientType { case exported.Solomachine: - var clientState *ClientState - if err := cdc.Unmarshal(bz, clientState); err != nil { - return err + any := &codectypes.Any{} + if err := cdc.Unmarshal(bz, any); err != nil { + return sdkerrors.Wrap(err, "failed to unmarshal client state bytes into solo machine client state") + } + + clientState := &ClientState{} + if err := cdc.Unmarshal(any.Value, clientState); err != nil { + return sdkerrors.Wrap(err, "failed to unmarshal client state bytes into solo machine client state") } updatedClientState := migrateSolomachine(clientState) bz, err := clienttypes.MarshalClientState(cdc, updatedClientState) if err != nil { - return err + return sdkerrors.Wrap(err, "failed to unmarshal client state bytes into solo machine client state") } // update solomachine in store @@ -119,13 +125,13 @@ func pruneSolomachineConsensusStates(clientStore sdk.KVStore) { defer iterator.Close() for ; iterator.Valid(); iterator.Next() { keySplit := strings.Split(string(iterator.Key()), "/") - // key is in the format "clients//consensusStates/" - if len(keySplit) != 4 || keySplit[2] != string(host.KeyConsensusStatePrefix) { + // key is in the format "consensusStates/" + if len(keySplit) != 2 || keySplit[0] != string(host.KeyConsensusStatePrefix) { continue } // collect consensus states to be pruned - heights = append(heights, types.MustParseHeight(keySplit[3])) + heights = append(heights, types.MustParseHeight(keySplit[1])) } // delete all consensus states diff --git a/modules/core/02-client/legacy/v100/store_test.go b/modules/core/02-client/legacy/v100/store_test.go index b5134fff691..31410bb59e0 100644 --- a/modules/core/02-client/legacy/v100/store_test.go +++ b/modules/core/02-client/legacy/v100/store_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100" + "github.com/cosmos/ibc-go/modules/core/02-client/types" host "github.com/cosmos/ibc-go/modules/core/24-host" "github.com/cosmos/ibc-go/modules/core/exported" ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types" @@ -42,7 +43,76 @@ func (suite *LegacyTestSuite) SetupTest() { // ensure all client states are migrated and all consensus states // are removed func (suite *LegacyTestSuite) TestMigrateStoreSolomachine() { + path := ibctesting.NewPath(suite.chainA, suite.chainB) + + // create multiple legacy solo machine clients + solomachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-0", "testing", 1) + solomachineMulti := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-1", "testing", 4) + + // manually generate old proto buf definitions and set in store + // NOTE: we cannot use 'CreateClient' and 'UpdateClient' functions since we are + // using client states and consensus states which do not implement the exported.ClientState + // and exported.ConsensusState interface + for _, sm := range []*ibctesting.Solomachine{solomachine, solomachineMulti} { + clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(path.EndpointA.Chain.GetContext(), sm.ClientID) + clientState := sm.ClientState() + + var seq uint64 + if clientState.IsFrozen { + seq = 1 + } + + // generate old client state proto defintion + legacyClientState := &v100.ClientState{ + Sequence: clientState.Sequence, + FrozenSequence: seq, + ConsensusState: &v100.ConsensusState{ + PublicKey: clientState.ConsensusState.PublicKey, + Diversifier: clientState.ConsensusState.Diversifier, + Timestamp: clientState.ConsensusState.Timestamp, + }, + AllowUpdateAfterProposal: clientState.AllowUpdateAfterProposal, + } + + // set client state + bz, err := path.EndpointA.Chain.App.AppCodec().MarshalInterface(legacyClientState) + suite.Require().NoError(err) + clientStore.Set(host.ClientStateKey(), bz) + + // set some consensus states + height1 := types.NewHeight(0, 1) + height2 := types.NewHeight(1, 2) + height3 := types.NewHeight(0, 123) + + bz, err = path.EndpointA.Chain.App.AppCodec().MarshalInterface(legacyClientState.ConsensusState) + suite.Require().NoError(err) + clientStore.Set(host.ConsensusStateKey(height1), bz) + clientStore.Set(host.ConsensusStateKey(height2), bz) + clientStore.Set(host.ConsensusStateKey(height3), bz) + + } + // create tendermint clients + suite.coordinator.SetupClients(path) + + err := v100.MigrateStore(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.GetSimApp().GetKey(host.StoreKey), path.EndpointA.Chain.App.AppCodec()) + suite.Require().NoError(err) + + // verify client state has been migrated + for _, sm := range []*ibctesting.Solomachine{solomachine, solomachineMulti} { + clientState, ok := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.GetClientState(path.EndpointA.Chain.GetContext(), sm.ClientID) + suite.Require().True(ok) + suite.Require().Equal(sm.ClientState(), clientState) + } + + // verify consensus states have been removed + for _, sm := range []*ibctesting.Solomachine{solomachine, solomachineMulti} { + clientConsensusStates := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.GetAllConsensusStates(path.EndpointA.Chain.GetContext()) + for _, client := range clientConsensusStates { + // GetAllConsensusStates should not return consensus states for our solo machine clients + suite.Require().NotEqual(sm.ClientID, client.ClientId) + } + } } // only test migration for tendermint clients From 545056e963b5ea3481ed5409caf98b76738ead2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Colin=20Axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Wed, 9 Jun 2021 14:05:58 +0200 Subject: [PATCH 09/21] add genesis test for solo machines --- modules/core/02-client/legacy/v100/genesis.go | 10 +- .../02-client/legacy/v100/genesis_test.go | 150 ++++++++++++++++++ .../core/02-client/legacy/v100/solomachine.go | 19 --- .../core/02-client/legacy/v100/store_test.go | 1 - 4 files changed, 158 insertions(+), 22 deletions(-) create mode 100644 modules/core/02-client/legacy/v100/genesis_test.go delete mode 100644 modules/core/02-client/legacy/v100/solomachine.go diff --git a/modules/core/02-client/legacy/v100/genesis.go b/modules/core/02-client/legacy/v100/genesis.go index 9fc9c09acdd..a00f697e4ea 100644 --- a/modules/core/02-client/legacy/v100/genesis.go +++ b/modules/core/02-client/legacy/v100/genesis.go @@ -4,6 +4,7 @@ import ( "bytes" "time" + "github.com/cosmos/cosmos-sdk/codec" sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" "github.com/cosmos/ibc-go/modules/core/02-client/types" @@ -11,12 +12,12 @@ import ( ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types" ) -// Migrate accepts exported v1.0.0 IBC client genesis file and migrates it to: +// MigrateGenesis accepts exported v1.0.0 IBC client genesis file and migrates it to: // // - Update solo machine client state protobuf definition (v1 to v2) // - Remove all solo machine consensus states // - Remove all expired tendermint consensus states -func Migrate(clientGenState *types.GenesisState, genesisBlockTime time.Time) (*types.GenesisState, error) { +func MigrateGenesis(cdc codec.BinaryCodec, clientGenState *types.GenesisState, genesisBlockTime time.Time) (*types.GenesisState, error) { for i, client := range clientGenState.Clients { clientType, _, err := types.ParseClientIdentifier(client.ClientId) @@ -26,6 +27,11 @@ func Migrate(clientGenState *types.GenesisState, genesisBlockTime time.Time) (*t // update solo machine client state defintions if clientType == exported.Solomachine { + clientState := &ClientState{} + if err := cdc.Unmarshal(client.ClientState.Value, clientState); err != nil { + return nil, sdkerrors.Wrap(err, "failed to unmarshal client state bytes into solo machine client state") + } + clientState, ok := client.ClientState.GetCachedValue().(*ClientState) if !ok { return nil, sdkerrors.Wrapf(sdkerrors.ErrUnpackAny, "cannot unpack Any into ClientState %T", client.ClientState) diff --git a/modules/core/02-client/legacy/v100/genesis_test.go b/modules/core/02-client/legacy/v100/genesis_test.go new file mode 100644 index 00000000000..a853148f1f4 --- /dev/null +++ b/modules/core/02-client/legacy/v100/genesis_test.go @@ -0,0 +1,150 @@ +package v100_test + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + + ibcclient "github.com/cosmos/ibc-go/modules/core/02-client" + v100 "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100" + "github.com/cosmos/ibc-go/modules/core/02-client/types" + host "github.com/cosmos/ibc-go/modules/core/24-host" + ibctesting "github.com/cosmos/ibc-go/testing" + "github.com/cosmos/ibc-go/testing/simapp" +) + +func (suite *LegacyTestSuite) TestMigrateGenesisSolomachine() { + path := ibctesting.NewPath(suite.chainA, suite.chainB) + encodingConfig := simapp.MakeTestEncodingConfig() + clientCtx := client.Context{}. + WithInterfaceRegistry(encodingConfig.InterfaceRegistry). + WithTxConfig(encodingConfig.TxConfig). + WithJSONCodec(encodingConfig.Marshaler) + + // create multiple legacy solo machine clients + solomachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-0", "testing", 1) + solomachineMulti := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-1", "testing", 4) + + // create tendermint clients + suite.coordinator.SetupClients(path) + clientGenState := ibcclient.ExportGenesis(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper) + + // manually generate old proto buf definitions and set in genesis + // NOTE: we cannot use 'ExportGenesis' for the solo machines since we are + // using client states and consensus states which do not implement the exported.ClientState + // and exported.ConsensusState interface + var clients []types.IdentifiedClientState + for _, sm := range []*ibctesting.Solomachine{solomachine, solomachineMulti} { + clientState := sm.ClientState() + + var seq uint64 + if clientState.IsFrozen { + seq = 1 + } + + // generate old client state proto defintion + legacyClientState := &v100.ClientState{ + Sequence: clientState.Sequence, + FrozenSequence: seq, + ConsensusState: &v100.ConsensusState{ + PublicKey: clientState.ConsensusState.PublicKey, + Diversifier: clientState.ConsensusState.Diversifier, + Timestamp: clientState.ConsensusState.Timestamp, + }, + AllowUpdateAfterProposal: clientState.AllowUpdateAfterProposal, + } + + // set client state + any, err := codectypes.NewAnyWithValue(legacyClientState) + suite.Require().NoError(err) + suite.Require().NotNil(any) + client := types.IdentifiedClientState{ + ClientId: sm.ClientID, + ClientState: any, + } + clients = append(clients, client) + + // set in store for ease of determining expected genesis + clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(path.EndpointA.Chain.GetContext(), sm.ClientID) + bz, err := path.EndpointA.Chain.App.AppCodec().MarshalInterface(legacyClientState) + suite.Require().NoError(err) + clientStore.Set(host.ClientStateKey(), bz) + + // set some consensus states + height1 := types.NewHeight(0, 1) + height2 := types.NewHeight(1, 2) + height3 := types.NewHeight(0, 123) + + any, err = codectypes.NewAnyWithValue(legacyClientState.ConsensusState) + suite.Require().NoError(err) + suite.Require().NotNil(any) + consensusState1 := types.ConsensusStateWithHeight{ + Height: height1, + ConsensusState: any, + } + consensusState2 := types.ConsensusStateWithHeight{ + Height: height2, + ConsensusState: any, + } + consensusState3 := types.ConsensusStateWithHeight{ + Height: height3, + ConsensusState: any, + } + + clientConsensusState := types.ClientConsensusStates{ + ClientId: sm.ClientID, + ConsensusStates: []types.ConsensusStateWithHeight{consensusState1, consensusState2, consensusState3}, + } + + clientGenState.ClientsConsensus = append(clientGenState.ClientsConsensus, clientConsensusState) + + // set in store for ease of determining expected genesis + bz, err = path.EndpointA.Chain.App.AppCodec().MarshalInterface(legacyClientState.ConsensusState) + suite.Require().NoError(err) + clientStore.Set(host.ConsensusStateKey(height1), bz) + clientStore.Set(host.ConsensusStateKey(height2), bz) + clientStore.Set(host.ConsensusStateKey(height3), bz) + } + // solo machine clients must come before tendermint in expected + clientGenState.Clients = append(clients, clientGenState.Clients...) + + // migrate store get expected genesis + // store migration and genesis migration should produce identical results + err := v100.MigrateStore(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.GetSimApp().GetKey(host.StoreKey), path.EndpointA.Chain.App.AppCodec()) + suite.Require().NoError(err) + expectedClientGenState := ibcclient.ExportGenesis(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper) + + // NOTE: genesis time isn't updated since we aren't testing for tendermint consensus state pruning + migrated, err := v100.MigrateGenesis(codec.NewProtoCodec(clientCtx.InterfaceRegistry), &clientGenState, suite.coordinator.CurrentTime) + suite.Require().NoError(err) + + bz, err := clientCtx.JSONCodec.MarshalJSON(&expectedClientGenState) + suite.Require().NoError(err) + + // Indent the JSON bz correctly. + var jsonObj map[string]interface{} + err = json.Unmarshal(bz, &jsonObj) + suite.Require().NoError(err) + expectedIndentedBz, err := json.MarshalIndent(jsonObj, "", "\t") + suite.Require().NoError(err) + + bz, err = clientCtx.JSONCodec.MarshalJSON(migrated) + suite.Require().NoError(err) + + // Indent the JSON bz correctly. + err = json.Unmarshal(bz, &jsonObj) + suite.Require().NoError(err) + indentedBz, err := json.MarshalIndent(jsonObj, "", "\t") + suite.Require().NoError(err) + + fmt.Println(string(indentedBz)) + + suite.Require().Equal(string(expectedIndentedBz), string(indentedBz)) +} + +func TestMigrateGenesisTendermint(t *testing.T) { +} diff --git a/modules/core/02-client/legacy/v100/solomachine.go b/modules/core/02-client/legacy/v100/solomachine.go deleted file mode 100644 index 57939cd32c4..00000000000 --- a/modules/core/02-client/legacy/v100/solomachine.go +++ /dev/null @@ -1,19 +0,0 @@ -package v100 - -import ( - codectypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" -) - -// Interface implementation checks. -var _, _ codectypes.UnpackInterfacesMessage = &ClientState{}, &ConsensusState{} - -// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method -func (cs ClientState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { - return cs.ConsensusState.UnpackInterfaces(unpacker) -} - -// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method -func (cs ConsensusState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { - return unpacker.UnpackAny(cs.PublicKey, new(cryptotypes.PubKey)) -} diff --git a/modules/core/02-client/legacy/v100/store_test.go b/modules/core/02-client/legacy/v100/store_test.go index 31410bb59e0..ee9934d812d 100644 --- a/modules/core/02-client/legacy/v100/store_test.go +++ b/modules/core/02-client/legacy/v100/store_test.go @@ -89,7 +89,6 @@ func (suite *LegacyTestSuite) TestMigrateStoreSolomachine() { clientStore.Set(host.ConsensusStateKey(height1), bz) clientStore.Set(host.ConsensusStateKey(height2), bz) clientStore.Set(host.ConsensusStateKey(height3), bz) - } // create tendermint clients From 40484bbdab0eed15c7265dc484fea34eaaf5792b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Colin=20Axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Wed, 9 Jun 2021 17:18:48 +0200 Subject: [PATCH 10/21] fix genesis migration bug, add tendermint tests --- modules/core/02-client/legacy/v100/genesis.go | 89 +++++++------ .../02-client/legacy/v100/genesis_test.go | 126 +++++++++++++++++- 2 files changed, 171 insertions(+), 44 deletions(-) diff --git a/modules/core/02-client/legacy/v100/genesis.go b/modules/core/02-client/legacy/v100/genesis.go index a00f697e4ea..7e94dc3c9cf 100644 --- a/modules/core/02-client/legacy/v100/genesis.go +++ b/modules/core/02-client/legacy/v100/genesis.go @@ -18,6 +18,15 @@ import ( // - Remove all solo machine consensus states // - Remove all expired tendermint consensus states func MigrateGenesis(cdc codec.BinaryCodec, clientGenState *types.GenesisState, genesisBlockTime time.Time) (*types.GenesisState, error) { + // To prune the consensus states, we will create new clientsConsensus + // and clientsMetadata. These slices will be filled up with consensus states + // which should not be pruned. No solo machine consensus states should be added + // and only unexpired consensus states for tendermint clients will be added. + // The metadata keys for unexpired consensus states will be added to clientsMetadata + var ( + clientsConsensus []types.ClientConsensusStates + clientsMetadata []types.IdentifiedGenesisMetadata + ) for i, client := range clientGenState.Clients { clientType, _, err := types.ParseClientIdentifier(client.ClientId) @@ -32,11 +41,6 @@ func MigrateGenesis(cdc codec.BinaryCodec, clientGenState *types.GenesisState, g return nil, sdkerrors.Wrap(err, "failed to unmarshal client state bytes into solo machine client state") } - clientState, ok := client.ClientState.GetCachedValue().(*ClientState) - if !ok { - return nil, sdkerrors.Wrapf(sdkerrors.ErrUnpackAny, "cannot unpack Any into ClientState %T", client.ClientState) - } - updatedClientState := migrateSolomachine(clientState) any, err := types.PackClientState(updatedClientState) @@ -50,66 +54,70 @@ func MigrateGenesis(cdc codec.BinaryCodec, clientGenState *types.GenesisState, g } } - // collect the client consensus state index for solo machine clients - var smConsStateByIndex []int - - // iterate consensus states - for i, clientConsensusState := range clientGenState.ClientsConsensus { + // iterate consensus states by client + for _, clientConsensusStates := range clientGenState.ClientsConsensus { // look for consensus states for the current client - if clientConsensusState.ClientId == client.ClientId { + if clientConsensusStates.ClientId == client.ClientId { switch clientType { case exported.Solomachine: // remove all consensus states for the solo machine - smConsStateByIndex = append(smConsStateByIndex, i) + // do not add to new clientsConsensus + case exported.Tendermint: - // prune expired consensus state + // only add non expired consensus states to new clientsConsensus tmClientState, ok := client.ClientState.GetCachedValue().(*ibctmtypes.ClientState) if !ok { return nil, types.ErrInvalidClient } - // collect the consensus state index for expired tendermint consensus states - var tmConsStateByIndex []int - - for i, consState := range clientConsensusState.ConsensusStates { + // collect unexpired consensus states + var unexpiredConsensusStates []types.ConsensusStateWithHeight + for _, consState := range clientConsensusStates.ConsensusStates { tmConsState := consState.ConsensusState.GetCachedValue().(*ibctmtypes.ConsensusState) - if tmClientState.IsExpired(tmConsState.Timestamp, genesisBlockTime) { - tmConsStateByIndex = append(tmConsStateByIndex, i) + if !tmClientState.IsExpired(tmConsState.Timestamp, genesisBlockTime) { + unexpiredConsensusStates = append(unexpiredConsensusStates, consState) } } - // remove all expired tendermint consensus states - for _, index := range tmConsStateByIndex { - for i, identifiedGenMetadata := range clientGenState.ClientsMetadata { + // if we found at least one unexpired consensus state, create a clientConsensusState + // and add it to clientsConsensus + if len(unexpiredConsensusStates) != 0 { + clientsConsensus = append(clientsConsensus, types.ClientConsensusStates{ + ClientId: client.ClientId, + ConsensusStates: unexpiredConsensusStates, + }) + } + + // remove all expired tendermint consensus state metadata by adding only + // unexpired consensus state metadata + for _, consState := range unexpiredConsensusStates { + for _, identifiedGenMetadata := range clientGenState.ClientsMetadata { // look for metadata for current client if identifiedGenMetadata.ClientId == client.ClientId { - // collect the metadata indicies to be removed - var tmConsMetadataByIndex []int - // obtain height for consensus state being pruned - height := clientConsensusState.ConsensusStates[index].Height + height := consState.Height - // iterate throught metadata and find metadata which should be pruned - for j, metadata := range identifiedGenMetadata.ClientMetadata { + // iterate through metadata and find metadata which should be pruned + // only unexpired consensus state heights should be added + var clientMetadata []types.GenesisMetadata + for _, metadata := range identifiedGenMetadata.ClientMetadata { if bytes.Equal(metadata.Key, ibctmtypes.IterationKey(height)) || bytes.Equal(metadata.Key, ibctmtypes.ProcessedTimeKey(height)) || bytes.Equal(metadata.Key, ibctmtypes.ProcessedHeightKey(height)) { - tmConsMetadataByIndex = append(tmConsMetadataByIndex, j) + clientMetadata = append(clientMetadata, metadata) } } - for _, metadataIndex := range tmConsMetadataByIndex { - clientGenState.ClientsMetadata[i].ClientMetadata = append(clientGenState.ClientsMetadata[i].ClientMetadata[:metadataIndex], clientGenState.ClientsMetadata[i].ClientMetadata[metadataIndex+1:]...) + // if we have metadata for unexipred consensus states, add it to consensusMetadata + if len(clientMetadata) != 0 { + clientsMetadata = append(clientsMetadata, types.IdentifiedGenesisMetadata{ + ClientId: client.ClientId, + ClientMetadata: clientMetadata, + }) } } } - - // remove client state - clientGenState.ClientsConsensus[i] = types.ClientConsensusStates{ - ClientId: clientConsensusState.ClientId, - ConsensusStates: append(clientConsensusState.ConsensusStates[:index], clientConsensusState.ConsensusStates[index+1:]...), - } } default: @@ -117,12 +125,9 @@ func MigrateGenesis(cdc codec.BinaryCodec, clientGenState *types.GenesisState, g } } } - - // remove all solo machine consensus states - for _, index := range smConsStateByIndex { - clientGenState.ClientsConsensus = append(clientGenState.ClientsConsensus[:index], clientGenState.ClientsConsensus[index+1:]...) - } } + clientGenState.ClientsConsensus = clientsConsensus + clientGenState.ClientsMetadata = clientsMetadata return clientGenState, nil } diff --git a/modules/core/02-client/legacy/v100/genesis_test.go b/modules/core/02-client/legacy/v100/genesis_test.go index a853148f1f4..e600f58aa1f 100644 --- a/modules/core/02-client/legacy/v100/genesis_test.go +++ b/modules/core/02-client/legacy/v100/genesis_test.go @@ -3,7 +3,7 @@ package v100_test import ( "encoding/json" "fmt" - "testing" + "time" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/codec" @@ -13,6 +13,8 @@ import ( v100 "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100" "github.com/cosmos/ibc-go/modules/core/02-client/types" host "github.com/cosmos/ibc-go/modules/core/24-host" + "github.com/cosmos/ibc-go/modules/core/exported" + ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types" ibctesting "github.com/cosmos/ibc-go/testing" "github.com/cosmos/ibc-go/testing/simapp" ) @@ -146,5 +148,125 @@ func (suite *LegacyTestSuite) TestMigrateGenesisSolomachine() { suite.Require().Equal(string(expectedIndentedBz), string(indentedBz)) } -func TestMigrateGenesisTendermint(t *testing.T) { +func (suite *LegacyTestSuite) TestMigrateGenesisTendermint() { + // create two paths and setup clients + path1 := ibctesting.NewPath(suite.chainA, suite.chainB) + path2 := ibctesting.NewPath(suite.chainA, suite.chainB) + encodingConfig := simapp.MakeTestEncodingConfig() + clientCtx := client.Context{}. + WithInterfaceRegistry(encodingConfig.InterfaceRegistry). + WithTxConfig(encodingConfig.TxConfig). + WithJSONCodec(encodingConfig.Marshaler) + + suite.coordinator.SetupClients(path1) + suite.coordinator.SetupClients(path2) + + // collect all heights expected to be pruned + var path1PruneHeights, path2PruneHeights []exported.Height + path1PruneHeights = append(path1PruneHeights, path1.EndpointA.GetClientState().GetLatestHeight()) + path2PruneHeights = append(path2PruneHeights, path2.EndpointA.GetClientState().GetLatestHeight()) + + // these heights will be expired and also pruned + for i := 0; i < 3; i++ { + path1.EndpointA.UpdateClient() + path1PruneHeights = append(path1PruneHeights, path1.EndpointA.GetClientState().GetLatestHeight()) + } + for i := 0; i < 3; i++ { + path2.EndpointA.UpdateClient() + path2PruneHeights = append(path2PruneHeights, path2.EndpointA.GetClientState().GetLatestHeight()) + } + + // Increment the time by a week + suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + + // create the consensus state that can be used as trusted height for next update + path1.EndpointA.UpdateClient() + path2.EndpointA.UpdateClient() + + clientGenState := ibcclient.ExportGenesis(suite.chainA.GetContext(), suite.chainA.App.GetIBCKeeper().ClientKeeper) + suite.Require().NotNil(clientGenState.Clients) + suite.Require().NotNil(clientGenState.ClientsConsensus) + suite.Require().NotNil(clientGenState.ClientsMetadata) + + // Increment the time by another week, then update the client. + // This will cause the consensus states created before the first time increment + // to be expired + suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + + // migrate store get expected genesis + // store migration and genesis migration should produce identical results + err := v100.MigrateStore(path1.EndpointA.Chain.GetContext(), path1.EndpointA.Chain.GetSimApp().GetKey(host.StoreKey), path1.EndpointA.Chain.App.AppCodec()) + suite.Require().NoError(err) + expectedClientGenState := ibcclient.ExportGenesis(path1.EndpointA.Chain.GetContext(), path1.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper) + + migrated, err := v100.MigrateGenesis(codec.NewProtoCodec(clientCtx.InterfaceRegistry), &clientGenState, suite.coordinator.CurrentTime) + suite.Require().NoError(err) + + // check path 1 client pruning + // fmt.Println(migrated.ClientsConsensus) + for _, height := range path1PruneHeights { + for _, client := range migrated.ClientsConsensus { + if client.ClientId == path1.EndpointA.ClientID { + for _, consensusState := range client.ConsensusStates { + suite.Require().NotEqual(height, consensusState.Height) + } + } + + } + for _, client := range migrated.ClientsMetadata { + if client.ClientId == path1.EndpointA.ClientID { + for _, metadata := range client.ClientMetadata { + suite.Require().NotEqual(ibctmtypes.ProcessedTimeKey(height), metadata.Key) + suite.Require().NotEqual(ibctmtypes.ProcessedHeightKey(height), metadata.Key) + suite.Require().NotEqual(ibctmtypes.IterationKey(height), metadata.Key) + } + } + } + } + + // check path 2 client pruning + for _, height := range path2PruneHeights { + for _, client := range migrated.ClientsConsensus { + if client.ClientId == path2.EndpointA.ClientID { + for _, consensusState := range client.ConsensusStates { + suite.Require().NotEqual(height, consensusState.Height) + } + } + + } + for _, client := range migrated.ClientsMetadata { + if client.ClientId == path2.EndpointA.ClientID { + for _, metadata := range client.ClientMetadata { + suite.Require().NotEqual(ibctmtypes.ProcessedTimeKey(height), metadata.Key) + suite.Require().NotEqual(ibctmtypes.ProcessedHeightKey(height), metadata.Key) + suite.Require().NotEqual(ibctmtypes.IterationKey(height), metadata.Key) + } + } + + } + } + bz, err := clientCtx.JSONCodec.MarshalJSON(&expectedClientGenState) + suite.Require().NoError(err) + + // Indent the JSON bz correctly. + var jsonObj map[string]interface{} + err = json.Unmarshal(bz, &jsonObj) + suite.Require().NoError(err) + expectedIndentedBz, err := json.MarshalIndent(jsonObj, "", "\t") + suite.Require().NoError(err) + + bz, err = clientCtx.JSONCodec.MarshalJSON(migrated) + suite.Require().NoError(err) + + // Indent the JSON bz correctly. + err = json.Unmarshal(bz, &jsonObj) + suite.Require().NoError(err) + indentedBz, err := json.MarshalIndent(jsonObj, "", "\t") + suite.Require().NoError(err) + + fmt.Println(string(indentedBz)) + + fmt.Println(string(expectedIndentedBz)) + + suite.Require().Equal(string(expectedIndentedBz), string(indentedBz)) } From a24618d642cce7b35d742fadca2628926bb63417 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Colin=20Axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Wed, 9 Jun 2021 18:03:45 +0200 Subject: [PATCH 11/21] test fix, changelog, migration docs --- CHANGELOG.md | 1 + docs/migrations/ibc-migration-043.md | 38 ++++++++++++++++++ modules/core/legacy/v100/genesis.go | 40 +++++++++++++++++++ modules/core/module.go | 2 +- .../07-tendermint/types/update.go | 1 - .../07-tendermint/types/update_test.go | 2 +- 6 files changed, 81 insertions(+), 3 deletions(-) create mode 100644 modules/core/legacy/v100/genesis.go diff --git a/CHANGELOG.md b/CHANGELOG.md index a559bed4e9c..cdbb4df2cd2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -70,6 +70,7 @@ Ref: https://keepachangelog.com/en/1.0.0/ ### Improvements +* (core/02-client) [\#205](https://github.com/cosmos/ibc-go/pull/205) Add in-place and genesis migrations from SDK v0.42.0 to ibc-go v1.0.0. Solo machine protobuf defintions are migrated from v1 to v2. All solo machine consensus states are pruned. All expired tendermint consensus states are pruned. * (modules/core) [\#184](https://github.com/cosmos/ibc-go/pull/184) Improve error messages. Uses unique error codes to indicate already relayed packets. * (07-tendermint) [\#182](https://github.com/cosmos/ibc-go/pull/182) Remove duplicate checks in upgrade logic. * (modules/core/04-channel) [\#7949](https://github.com/cosmos/cosmos-sdk/issues/7949) Standardized channel `Acknowledgement` moved to its own file. Codec registration redundancy removed. diff --git a/docs/migrations/ibc-migration-043.md b/docs/migrations/ibc-migration-043.md index 177bb3c7ef9..3c277bc30c5 100644 --- a/docs/migrations/ibc-migration-043.md +++ b/docs/migrations/ibc-migration-043.md @@ -27,6 +27,44 @@ Feel free to use your own method for modifying import names. NOTE: Updating to the `v0.43.0` SDK release and then running `go mod tidy` will cause a downgrade to `v0.42.0` in order to support the old IBC import paths. Update the import paths before running `go mod tidy`. +## Chain Upgrades + +Chains may choose to upgrade via an upgrade proposal or genesis upgrades. Both in-place store migrations and genesis migrations are supported. + +**WARNING**: Please read at least the quick guide for [IBC client upgrades](../ibc/upgrades/README.md) before upgrading your chain. It is highly recommended to not change the chain id during an upgrade, otherwise you must follow the IBC client upgrade instructions. + +Both in-place store migrations and genesis migrations will: +- update the solo machine client state to using the v2 solo machine protobuf defintion +- prune all solo machine consensus states +- prune all expired tendermint consensus states + +### In-Place Store Migrations + +In place store migrations will automatically be run after the binary is swapped and the application is started again during an upgrade proposal. + +### Genesis Migrations + +To perform genesis migrations, the following code must be added to your existing migration code. + +```go +// add imports as necessary +import ( + "github.com/cosmos/cosmos-sdk/codec" + ibcv100"github.com/cosmos/ibc-go/modules/core/legacy/v100" + ibchost "github.com/cosmos/ibc-go/modules/core/24-host" +) + +... + +// add in migrate cmd function +newGenState, err = ibcv100.MigrateGenesis(clientCtx.InterfaceRegistry, newGenState, genDoc.GenesisTime) +if err != nil { + return err +} +``` + +**NOTE:** The genesis time MUST be updated before migrating IBC, otherwise the tendermint consensus state will not be pruned. + ## IBC Keeper Changes The IBC Keeper now takes in the Upgrade Keeper. Please add the chains' Upgrade Keeper after the Staking Keeper: diff --git a/modules/core/legacy/v100/genesis.go b/modules/core/legacy/v100/genesis.go new file mode 100644 index 00000000000..28fbc1a42a4 --- /dev/null +++ b/modules/core/legacy/v100/genesis.go @@ -0,0 +1,40 @@ +package v100 + +import ( + "time" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types" + + clientv100 "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100" + host "github.com/cosmos/ibc-go/modules/core/24-host" + "github.com/cosmos/ibc-go/modules/core/types" +) + +// MigrateGenesis accepts exported v1.0.0 IBC client genesis file and migrates it to: +// +// - Update solo machine client state protobuf definition (v1 to v2) +// - Remove all solo machine consensus states +// - Remove all expired tendermint consensus states +func MigrateGenesis(appState genutiltypes.AppMap, clientCtx client.Context, genesisBlockTime time.Time) (genutiltypes.AppMap, error) { + if appState[host.ModuleName] != nil { + // unmarshal relative source genesis application state + ibcGenState := &types.GenesisState{} + clientCtx.JSONCodec.MustUnmarshalJSON(appState[host.ModuleName], ibcGenState) + + clientGenState, err := clientv100.MigrateGenesis(codec.NewProtoCodec(clientCtx.InterfaceRegistry), &ibcGenState.ClientGenesis, genesisBlockTime) + if err != nil { + return nil, err + } + + ibcGenState.ClientGenesis = *clientGenState + + // delete old genesis state + delete(appState, host.ModuleName) + + // set new ibc genesis state + appState[host.ModuleName] = clientCtx.JSONCodec.MustMarshalJSON(ibcGenState) + } + return appState, nil +} diff --git a/modules/core/module.go b/modules/core/module.go index dac61e776cf..db7aaba3e33 100644 --- a/modules/core/module.go +++ b/modules/core/module.go @@ -161,7 +161,7 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.Raw } // ConsensusVersion implements AppModule/ConsensusVersion. -func (AppModule) ConsensusVersion() uint64 { return 1 } +func (AppModule) ConsensusVersion() uint64 { return 2 } // BeginBlock returns the begin blocker for the ibc module. func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) { diff --git a/modules/light-clients/07-tendermint/types/update.go b/modules/light-clients/07-tendermint/types/update.go index c70746b4f74..c2e6788fed1 100644 --- a/modules/light-clients/07-tendermint/types/update.go +++ b/modules/light-clients/07-tendermint/types/update.go @@ -134,7 +134,6 @@ func (cs ClientState) CheckHeaderAndUpdateState( } // if pruneHeight is set, delete consensus state and metadata if pruneHeight != nil { - deleteConsensusState(clientStore, pruneHeight) deleteConsensusMetadata(clientStore, pruneHeight) } diff --git a/modules/light-clients/07-tendermint/types/update_test.go b/modules/light-clients/07-tendermint/types/update_test.go index 6626e73b5da..db074eee5d2 100644 --- a/modules/light-clients/07-tendermint/types/update_test.go +++ b/modules/light-clients/07-tendermint/types/update_test.go @@ -428,7 +428,7 @@ func (suite *TendermintTestSuite) TestPruneConsensusState() { suite.Require().Equal(uint64(0), processTime, "processed time metadata not pruned") suite.Require().False(ok) processHeight, ok := types.GetProcessedHeight(clientStore, pruneHeight) - suite.Require().Equal(uint64(0), processHeight, "processed height metadata not pruned") + suite.Require().Nil(processHeight, "processed height metadata not pruned") suite.Require().False(ok) // check iteration key metadata is pruned From 6d9bcdda50155c1903ddf430db5b2b49b80e7b1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?colin=20axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Wed, 9 Jun 2021 18:23:11 +0200 Subject: [PATCH 12/21] Apply suggestions from code review --- docs/migrations/ibc-migration-043.md | 5 ++--- modules/core/02-client/legacy/v100/genesis_test.go | 2 -- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/docs/migrations/ibc-migration-043.md b/docs/migrations/ibc-migration-043.md index 3c277bc30c5..755157224fa 100644 --- a/docs/migrations/ibc-migration-043.md +++ b/docs/migrations/ibc-migration-043.md @@ -31,10 +31,10 @@ Update the import paths before running `go mod tidy`. Chains may choose to upgrade via an upgrade proposal or genesis upgrades. Both in-place store migrations and genesis migrations are supported. -**WARNING**: Please read at least the quick guide for [IBC client upgrades](../ibc/upgrades/README.md) before upgrading your chain. It is highly recommended to not change the chain id during an upgrade, otherwise you must follow the IBC client upgrade instructions. +**WARNING**: Please read at least the quick guide for [IBC client upgrades](../ibc/upgrades/README.md) before upgrading your chain. It is highly recommended you do not change the chain-ID during an upgrade, otherwise you must follow the IBC client upgrade instructions. Both in-place store migrations and genesis migrations will: -- update the solo machine client state to using the v2 solo machine protobuf defintion +- migrate the solo machine client state from v1 to v2 protobuf definitions - prune all solo machine consensus states - prune all expired tendermint consensus states @@ -49,7 +49,6 @@ To perform genesis migrations, the following code must be added to your existing ```go // add imports as necessary import ( - "github.com/cosmos/cosmos-sdk/codec" ibcv100"github.com/cosmos/ibc-go/modules/core/legacy/v100" ibchost "github.com/cosmos/ibc-go/modules/core/24-host" ) diff --git a/modules/core/02-client/legacy/v100/genesis_test.go b/modules/core/02-client/legacy/v100/genesis_test.go index e600f58aa1f..29794f9dd58 100644 --- a/modules/core/02-client/legacy/v100/genesis_test.go +++ b/modules/core/02-client/legacy/v100/genesis_test.go @@ -203,7 +203,6 @@ func (suite *LegacyTestSuite) TestMigrateGenesisTendermint() { suite.Require().NoError(err) // check path 1 client pruning - // fmt.Println(migrated.ClientsConsensus) for _, height := range path1PruneHeights { for _, client := range migrated.ClientsConsensus { if client.ClientId == path1.EndpointA.ClientID { @@ -266,7 +265,6 @@ func (suite *LegacyTestSuite) TestMigrateGenesisTendermint() { fmt.Println(string(indentedBz)) - fmt.Println(string(expectedIndentedBz)) suite.Require().Equal(string(expectedIndentedBz), string(indentedBz)) } From a67102fec5172aaf1c67e98c87ce96c9900fb583 Mon Sep 17 00:00:00 2001 From: Aditya Date: Wed, 9 Jun 2021 14:10:19 -0400 Subject: [PATCH 13/21] Update docs/migrations/ibc-migration-043.md --- docs/migrations/ibc-migration-043.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/migrations/ibc-migration-043.md b/docs/migrations/ibc-migration-043.md index 755157224fa..d81b26041d3 100644 --- a/docs/migrations/ibc-migration-043.md +++ b/docs/migrations/ibc-migration-043.md @@ -49,7 +49,7 @@ To perform genesis migrations, the following code must be added to your existing ```go // add imports as necessary import ( - ibcv100"github.com/cosmos/ibc-go/modules/core/legacy/v100" + ibcv100 "github.com/cosmos/ibc-go/modules/core/legacy/v100" ibchost "github.com/cosmos/ibc-go/modules/core/24-host" ) From 7e1f40fd5122b5fb09a57d20d23a2dc587fea764 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Colin=20Axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Thu, 10 Jun 2021 11:44:19 +0200 Subject: [PATCH 14/21] apply Aditya's review suggestions --- modules/core/02-client/legacy/v100/genesis.go | 13 +++-- .../02-client/legacy/v100/genesis_test.go | 47 ++++++++++++++++++- modules/core/02-client/legacy/v100/store.go | 44 ++++++++++++++++- .../core/02-client/legacy/v100/store_test.go | 17 +++++++ 4 files changed, 114 insertions(+), 7 deletions(-) diff --git a/modules/core/02-client/legacy/v100/genesis.go b/modules/core/02-client/legacy/v100/genesis.go index 7e94dc3c9cf..093eccc2c46 100644 --- a/modules/core/02-client/legacy/v100/genesis.go +++ b/modules/core/02-client/legacy/v100/genesis.go @@ -17,7 +17,7 @@ import ( // - Update solo machine client state protobuf definition (v1 to v2) // - Remove all solo machine consensus states // - Remove all expired tendermint consensus states -func MigrateGenesis(cdc codec.BinaryCodec, clientGenState *types.GenesisState, genesisBlockTime time.Time) (*types.GenesisState, error) { +func MigrateGenesis(cdc codec.BinaryCodec, clientGenState *types.GenesisState, genesisBlockTime time.Time, selfHeight exported.Height) (*types.GenesisState, error) { // To prune the consensus states, we will create new clientsConsensus // and clientsMetadata. These slices will be filled up with consensus states // which should not be pruned. No solo machine consensus states should be added @@ -102,9 +102,14 @@ func MigrateGenesis(cdc codec.BinaryCodec, clientGenState *types.GenesisState, g // only unexpired consensus state heights should be added var clientMetadata []types.GenesisMetadata for _, metadata := range identifiedGenMetadata.ClientMetadata { - if bytes.Equal(metadata.Key, ibctmtypes.IterationKey(height)) || - bytes.Equal(metadata.Key, ibctmtypes.ProcessedTimeKey(height)) || - bytes.Equal(metadata.Key, ibctmtypes.ProcessedHeightKey(height)) { + // the previous version of IBC only contained the processed time metadata + // if we find the processed time metadata for an unexpired height, add the + // iteration key and processed height keys. + if bytes.Equal(metadata.Key, ibctmtypes.ProcessedTimeKey(height)) { + clientMetadata = append(clientMetadata, types.GenesisMetadata{ + Key: ibctmtypes.ProcessedHeightKey(height), + Value: []byte(selfHeight.String()), + }) clientMetadata = append(clientMetadata, metadata) } } diff --git a/modules/core/02-client/legacy/v100/genesis_test.go b/modules/core/02-client/legacy/v100/genesis_test.go index e600f58aa1f..6ccc8a1b7a1 100644 --- a/modules/core/02-client/legacy/v100/genesis_test.go +++ b/modules/core/02-client/legacy/v100/genesis_test.go @@ -1,6 +1,7 @@ package v100_test import ( + "bytes" "encoding/json" "fmt" "time" @@ -120,8 +121,29 @@ func (suite *LegacyTestSuite) TestMigrateGenesisSolomachine() { suite.Require().NoError(err) expectedClientGenState := ibcclient.ExportGenesis(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper) + // 'ExportGenesis' order metadata keys by processedheight, processedtime for all heights, then it appends all iteration keys + // In order to match the genesis migration with export genesis we must reorder the iteration keys to be last + // This isn't ideal, but it is better than modifying the genesis migration from a previous version to match the export genesis of a new version + // which provides no benefit except nicer testing + for i, clientMetadata := range expectedClientGenState.ClientsMetadata { + var updatedMetadata []types.GenesisMetadata + var iterationKeys []types.GenesisMetadata + for _, metadata := range clientMetadata.ClientMetadata { + if bytes.HasPrefix(metadata.Key, []byte(ibctmtypes.KeyIterateConsensusStatePrefix)) { + iterationKeys = append(iterationKeys, metadata) + } else { + updatedMetadata = append(updatedMetadata, metadata) + } + } + updatedMetadata = append(updatedMetadata, iterationKeys...) + expectedClientGenState.ClientsMetadata[i] = types.IdentifiedGenesisMetadata{ + ClientId: clientMetadata.ClientId, + ClientMetadata: updatedMetadata, + } + } + // NOTE: genesis time isn't updated since we aren't testing for tendermint consensus state pruning - migrated, err := v100.MigrateGenesis(codec.NewProtoCodec(clientCtx.InterfaceRegistry), &clientGenState, suite.coordinator.CurrentTime) + migrated, err := v100.MigrateGenesis(codec.NewProtoCodec(clientCtx.InterfaceRegistry), &clientGenState, suite.coordinator.CurrentTime, types.GetSelfHeight(suite.chainA.GetContext())) suite.Require().NoError(err) bz, err := clientCtx.JSONCodec.MarshalJSON(&expectedClientGenState) @@ -199,7 +221,28 @@ func (suite *LegacyTestSuite) TestMigrateGenesisTendermint() { suite.Require().NoError(err) expectedClientGenState := ibcclient.ExportGenesis(path1.EndpointA.Chain.GetContext(), path1.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper) - migrated, err := v100.MigrateGenesis(codec.NewProtoCodec(clientCtx.InterfaceRegistry), &clientGenState, suite.coordinator.CurrentTime) + // 'ExportGenesis' order metadata keys by processedheight, processedtime for all heights, then it appends all iteration keys + // In order to match the genesis migration with export genesis we must reorder the iteration keys to be last + // This isn't ideal, but it is better than modifying the genesis migration from a previous version to match the export genesis of a new version + // which provides no benefit except nicer testing + for i, clientMetadata := range expectedClientGenState.ClientsMetadata { + var updatedMetadata []types.GenesisMetadata + var iterationKeys []types.GenesisMetadata + for _, metadata := range clientMetadata.ClientMetadata { + if bytes.HasPrefix(metadata.Key, []byte(ibctmtypes.KeyIterateConsensusStatePrefix)) { + iterationKeys = append(iterationKeys, metadata) + } else { + updatedMetadata = append(updatedMetadata, metadata) + } + } + updatedMetadata = append(updatedMetadata, iterationKeys...) + expectedClientGenState.ClientsMetadata[i] = types.IdentifiedGenesisMetadata{ + ClientId: clientMetadata.ClientId, + ClientMetadata: updatedMetadata, + } + } + + migrated, err := v100.MigrateGenesis(codec.NewProtoCodec(clientCtx.InterfaceRegistry), &clientGenState, suite.coordinator.CurrentTime, types.GetSelfHeight(suite.chainA.GetContext())) suite.Require().NoError(err) // check path 1 client pruning diff --git a/modules/core/02-client/legacy/v100/store.go b/modules/core/02-client/legacy/v100/store.go index 1ca2e2fa856..41178cf88d5 100644 --- a/modules/core/02-client/legacy/v100/store.go +++ b/modules/core/02-client/legacy/v100/store.go @@ -87,7 +87,16 @@ func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) return sdkerrors.Wrap(err, "failed to unmarshal client state bytes into tendermint client state") } - if err = ibctmtypes.PruneAllExpiredConsensusStates(ctx, clientStore, cdc, clientState.(*ibctmtypes.ClientState)); err != nil { + tmClientState, ok := clientState.(*ibctmtypes.ClientState) + if !ok { + return sdkerrors.Wrap(types.ErrInvalidClient, "client state is not tendermint even though client id contains 07-tendermint") + } + + if err = ibctmtypes.PruneAllExpiredConsensusStates(ctx, clientStore, cdc, tmClientState); err != nil { + return err + } + + if err = addConsensusMetadata(ctx, clientStore, cdc, tmClientState); err != nil { return err } @@ -139,3 +148,36 @@ func pruneSolomachineConsensusStates(clientStore sdk.KVStore) { clientStore.Delete(host.ConsensusStateKey(height)) } } + +// addConsensusMetadata adds the iteration key and processed height for all unexpired tendermint consensus states +// These keys were not included in the previous release of the IBC module. +func addConsensusMetadata(ctx sdk.Context, clientStore sdk.KVStore, cdc codec.BinaryCodec, clientState *ibctmtypes.ClientState) error { + var heights []exported.Height + + metadataCb := func(height exported.Height) bool { + consState, err := ibctmtypes.GetConsensusState(clientStore, cdc, height) + // this error should never occur + if err != nil { + return true + } + + if !clientState.IsExpired(consState.Timestamp, ctx.BlockTime()) { + heights = append(heights, height) + } + + return false + } + + if err := ibctmtypes.IterateConsensusStateAscending(clientStore, metadataCb); err != nil { + return err + } + + for _, height := range heights { + // set the iteration key and processed height + // these keys were not included in the SDK v0.42.0 release + ibctmtypes.SetProcessedHeight(clientStore, height, clienttypes.GetSelfHeight(ctx)) + ibctmtypes.SetIterationKey(clientStore, height) + } + + return nil +} diff --git a/modules/core/02-client/legacy/v100/store_test.go b/modules/core/02-client/legacy/v100/store_test.go index ee9934d812d..1ab17a44b9f 100644 --- a/modules/core/02-client/legacy/v100/store_test.go +++ b/modules/core/02-client/legacy/v100/store_test.go @@ -185,4 +185,21 @@ func (suite *LegacyTestSuite) TestMigrateStoreTendermint() { expectedConsKey := ibctmtypes.GetIterationKey(clientStore, pruneHeight) suite.Require().Nil(expectedConsKey, i) } + + // ensure metadata is set for unexpired consensus state + height := path.EndpointA.GetClientState().GetLatestHeight() + consState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, height) + suite.Require().True(ok) + suite.Require().NotNil(consState) + + processedTime, ok := ibctmtypes.GetProcessedTime(clientStore, height) + suite.Require().True(ok) + suite.Require().NotEqual(uint64(0), processedTime) + + processedHeight, ok := ibctmtypes.GetProcessedHeight(clientStore, height) + suite.Require().True(ok) + suite.Require().Equal(types.GetSelfHeight(path.EndpointA.Chain.GetContext()), processedHeight) + + consKey := ibctmtypes.GetIterationKey(clientStore, height) + suite.Require().Equal(host.ConsensusStateKey(height), consKey) } From 7edac1e68d36c94ddf590dffb5bcdd2befe73dcb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Colin=20Axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Thu, 10 Jun 2021 11:46:39 +0200 Subject: [PATCH 15/21] fix tests --- modules/core/02-client/legacy/v100/genesis.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/modules/core/02-client/legacy/v100/genesis.go b/modules/core/02-client/legacy/v100/genesis.go index 093eccc2c46..cdfa7a8348b 100644 --- a/modules/core/02-client/legacy/v100/genesis.go +++ b/modules/core/02-client/legacy/v100/genesis.go @@ -8,6 +8,7 @@ import ( sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" "github.com/cosmos/ibc-go/modules/core/02-client/types" + host "github.com/cosmos/ibc-go/modules/core/24-host" "github.com/cosmos/ibc-go/modules/core/exported" ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types" ) @@ -111,6 +112,11 @@ func MigrateGenesis(cdc codec.BinaryCodec, clientGenState *types.GenesisState, g Value: []byte(selfHeight.String()), }) clientMetadata = append(clientMetadata, metadata) + clientMetadata = append(clientMetadata, types.GenesisMetadata{ + Key: ibctmtypes.IterationKey(height), + Value: host.ConsensusStateKey(height), + }) + } } From e326f743d4310343de65249f776cf464b72610b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Colin=20Axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Thu, 10 Jun 2021 14:10:05 +0200 Subject: [PATCH 16/21] add genesis json unmarshal test Test that the legacy solo machines can be successfully unmarshalled. This requires registering an implementation for the legacy solo machine. An implemenation which panics has been added. This implementation should only be registered against a clientCtx during a migrate cli cmd. The implementation is only briefly used in order to decode the previous solo machine set in genesis. --- docs/migrations/ibc-migration-043.md | 4 +- modules/core/02-client/keeper/migrations.go | 11 +- modules/core/02-client/legacy/v100/genesis.go | 1 + .../core/02-client/legacy/v100/solomachine.go | 208 ++++++++++++++++++ modules/core/02-client/legacy/v100/store.go | 1 + modules/core/exported/client.go | 3 +- modules/core/keeper/migrations.go | 9 +- modules/core/legacy/v100/genesis.go | 11 +- modules/core/legacy/v100/genesis_test.go | 198 +++++++++++++++++ 9 files changed, 427 insertions(+), 19 deletions(-) create mode 100644 modules/core/02-client/legacy/v100/solomachine.go create mode 100644 modules/core/legacy/v100/genesis_test.go diff --git a/docs/migrations/ibc-migration-043.md b/docs/migrations/ibc-migration-043.md index d81b26041d3..6a3a3828b5a 100644 --- a/docs/migrations/ibc-migration-043.md +++ b/docs/migrations/ibc-migration-043.md @@ -56,13 +56,13 @@ import ( ... // add in migrate cmd function -newGenState, err = ibcv100.MigrateGenesis(clientCtx.InterfaceRegistry, newGenState, genDoc.GenesisTime) +newGenState, err = ibcv100.MigrateGenesis(clientCtx.InterfaceRegistry, newGenState, genDoc) if err != nil { return err } ``` -**NOTE:** The genesis time MUST be updated before migrating IBC, otherwise the tendermint consensus state will not be pruned. +**NOTE:** The genesis chain-id, time and height MUST be updated before migrating IBC, otherwise the tendermint consensus state will not be pruned. ## IBC Keeper Changes diff --git a/modules/core/02-client/keeper/migrations.go b/modules/core/02-client/keeper/migrations.go index 3076b628129..5f2088d6ccc 100644 --- a/modules/core/02-client/keeper/migrations.go +++ b/modules/core/02-client/keeper/migrations.go @@ -17,12 +17,11 @@ func NewMigrator(keeper Keeper) Migrator { } // Migrate1to2 migrates from version 1 to 2. -// This migration prunes: -// - solo machine consensus states -// - expired tendermint consensus states -// -// This migration migrates: -// - solo machine client states from v1 to v2 protobuf definition +// This migration +// - migrates solo machine client states from v1 to v2 protobuf definition +// - prunes solo machine consensus states +// - prunes expired tendermint consensus states +// - adds iteration and processed height keys for unexpired tendermint consensus states func (m Migrator) Migrate1to2(ctx sdk.Context) error { return v100.MigrateStore(ctx, m.keeper.storeKey, m.keeper.cdc) } diff --git a/modules/core/02-client/legacy/v100/genesis.go b/modules/core/02-client/legacy/v100/genesis.go index cdfa7a8348b..b62ea64632f 100644 --- a/modules/core/02-client/legacy/v100/genesis.go +++ b/modules/core/02-client/legacy/v100/genesis.go @@ -18,6 +18,7 @@ import ( // - Update solo machine client state protobuf definition (v1 to v2) // - Remove all solo machine consensus states // - Remove all expired tendermint consensus states +// - Adds ProcessedHeight and Iteration keys for unexpired tendermint consensus states func MigrateGenesis(cdc codec.BinaryCodec, clientGenState *types.GenesisState, genesisBlockTime time.Time, selfHeight exported.Height) (*types.GenesisState, error) { // To prune the consensus states, we will create new clientsConsensus // and clientsMetadata. These slices will be filled up with consensus states diff --git a/modules/core/02-client/legacy/v100/solomachine.go b/modules/core/02-client/legacy/v100/solomachine.go new file mode 100644 index 00000000000..80b062faff1 --- /dev/null +++ b/modules/core/02-client/legacy/v100/solomachine.go @@ -0,0 +1,208 @@ +package v100 + +import ( + ics23 "github.com/confio/ics23/go" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/modules/core/exported" +) + +// NOTE: this is a mock implmentation for exported.ClientState. This implementation +// should only be registered on the InterfaceRegistry during cli command genesis migration. +// This implementation is only used to successfully unmarshal the previous solo machine +// client state and consensus state and migrate them to the new implementations. When the proto +// codec unmarshals, it calls UnpackInterfaces() to create a cached value of the any. The +// UnpackInterfaces function for IdenitifiedClientState will attempt to unpack the any to +// exported.ClientState. If the solomachine v1 type is not registered against the exported.ClientState +// the unmarshal will fail. This implementation will panic on every interface function. +// The same is done for the ConsensusState. + +// Interface implementation checks. +var ( + _, _ codectypes.UnpackInterfacesMessage = &ClientState{}, &ConsensusState{} + _ exported.ClientState = (*ClientState)(nil) + _ exported.ConsensusState = &ConsensusState{} +) + +func RegisterInterfaces(registry codectypes.InterfaceRegistry) { + registry.RegisterImplementations( + (*exported.ClientState)(nil), + &ClientState{}, + ) + registry.RegisterImplementations( + (*exported.ConsensusState)(nil), + &ConsensusState{}, + ) +} + +// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method +func (cs ClientState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return cs.ConsensusState.UnpackInterfaces(unpacker) +} + +// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method +func (cs ConsensusState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return unpacker.UnpackAny(cs.PublicKey, new(cryptotypes.PubKey)) +} + +// ClientType panics! +func (cs ClientState) ClientType() string { + panic("legacy solo machine is deprecated!") +} + +// GetLatestHeight panics! +func (cs ClientState) GetLatestHeight() exported.Height { + panic("legacy solo machine is deprecated!") +} + +// Status panics! +func (cs ClientState) Status(_ sdk.Context, _ sdk.KVStore, _ codec.BinaryCodec) exported.Status { + panic("legacy solo machine is deprecated!") +} + +// Validate panics! +func (cs ClientState) Validate() error { + panic("legacy solo machine is deprecated!") +} + +// GetProofSpecs panics! +func (cs ClientState) GetProofSpecs() []*ics23.ProofSpec { + panic("legacy solo machine is deprecated!") +} + +// ZeroCustomFields panics! +func (cs ClientState) ZeroCustomFields() exported.ClientState { + panic("legacy solo machine is deprecated!") +} + +// Initialize panics! +func (cs ClientState) Initialize(_ sdk.Context, _ codec.BinaryCodec, _ sdk.KVStore, consState exported.ConsensusState) error { + panic("legacy solo machine is deprecated!") +} + +// ExportMetadata panics! +func (cs ClientState) ExportMetadata(_ sdk.KVStore) []exported.GenesisMetadata { + panic("legacy solo machine is deprecated!") +} + +// CheckHeaderAndUpdateState panics! +func (cs *ClientState) CheckHeaderAndUpdateState( + _ sdk.Context, _ codec.BinaryCodec, _ sdk.KVStore, _ exported.Header, +) (exported.ClientState, exported.ConsensusState, error) { + panic("legacy solo machine is deprecated!") +} + +// CheckMisbehaviourAndUpdateState panics! +func (cs ClientState) CheckMisbehaviourAndUpdateState( + _ sdk.Context, _ codec.BinaryCodec, _ sdk.KVStore, _ exported.Misbehaviour, +) (exported.ClientState, error) { + panic("legacy solo machine is deprecated!") +} + +// CheckSubstituteAndUpdateState panics! +func (cs ClientState) CheckSubstituteAndUpdateState( + ctx sdk.Context, _ codec.BinaryCodec, _, _ sdk.KVStore, + _ exported.ClientState, +) (exported.ClientState, error) { + panic("legacy solo machine is deprecated!") +} + +// VerifyUpgradeAndUpdateState panics! +func (cs ClientState) VerifyUpgradeAndUpdateState( + _ sdk.Context, _ codec.BinaryCodec, _ sdk.KVStore, + _ exported.ClientState, _ exported.ConsensusState, _, _ []byte, +) (exported.ClientState, exported.ConsensusState, error) { + panic("legacy solo machine is deprecated!") +} + +// VerifyClientState panics! +func (cs ClientState) VerifyClientState( + store sdk.KVStore, cdc codec.BinaryCodec, + _ exported.Height, _ exported.Prefix, _ string, _ []byte, clientState exported.ClientState, +) error { + panic("legacy solo machine is deprecated!") +} + +// VerifyClientConsensusState panics! +func (cs ClientState) VerifyClientConsensusState( + sdk.KVStore, codec.BinaryCodec, + exported.Height, string, exported.Height, exported.Prefix, + []byte, exported.ConsensusState, +) error { + panic("legacy solo machine is deprecated!") +} + +// VerifyConnectionState panics! +func (cs ClientState) VerifyConnectionState( + sdk.KVStore, codec.BinaryCodec, exported.Height, + exported.Prefix, []byte, string, exported.ConnectionI, +) error { + panic("legacy solo machine is deprecated!") +} + +// VerifyChannelState panics! +func (cs ClientState) VerifyChannelState( + sdk.KVStore, codec.BinaryCodec, exported.Height, exported.Prefix, + []byte, string, string, exported.ChannelI, +) error { + panic("legacy solo machine is deprecated!") +} + +// VerifyPacketCommitment panics! +func (cs ClientState) VerifyPacketCommitment( + sdk.Context, sdk.KVStore, codec.BinaryCodec, exported.Height, + uint64, uint64, exported.Prefix, []byte, + string, string, uint64, []byte, +) error { + panic("legacy solo machine is deprecated!") +} + +// VerifyPacketAcknowledgement panics! +func (cs ClientState) VerifyPacketAcknowledgement( + sdk.Context, sdk.KVStore, codec.BinaryCodec, exported.Height, + uint64, uint64, exported.Prefix, []byte, + string, string, uint64, []byte, +) error { + panic("legacy solo machine is deprecated!") +} + +// VerifyPacketReceiptAbsence panics! +func (cs ClientState) VerifyPacketReceiptAbsence( + sdk.Context, sdk.KVStore, codec.BinaryCodec, exported.Height, + uint64, uint64, exported.Prefix, []byte, + string, string, uint64, +) error { + panic("legacy solo machine is deprecated!") +} + +// VerifyNextSequenceRecv panics! +func (cs ClientState) VerifyNextSequenceRecv( + sdk.Context, sdk.KVStore, codec.BinaryCodec, exported.Height, + uint64, uint64, exported.Prefix, []byte, + string, string, uint64, +) error { + panic("legacy solo machine is deprecated!") +} + +// ClientType panics! +func (ConsensusState) ClientType() string { + panic("legacy solo machine is deprecated!") +} + +// GetTimestamp panics! +func (cs ConsensusState) GetTimestamp() uint64 { + panic("legacy solo machine is deprecated!") +} + +// GetRoot panics! +func (cs ConsensusState) GetRoot() exported.Root { + panic("legacy solo machine is deprecated!") +} + +// ValidateBasic panics! +func (cs ConsensusState) ValidateBasic() error { + panic("legacy solo machine is deprecated!") +} diff --git a/modules/core/02-client/legacy/v100/store.go b/modules/core/02-client/legacy/v100/store.go index 41178cf88d5..9dfb68c7e1e 100644 --- a/modules/core/02-client/legacy/v100/store.go +++ b/modules/core/02-client/legacy/v100/store.go @@ -24,6 +24,7 @@ import ( // - Migrating solo machine client states from v1 to v2 protobuf definition // - Pruning all solo machine consensus states // - Pruning expired tendermint consensus states +// - Adds ProcessedHeight and Iteration keys for unexpired tendermint consensus states func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) (err error) { store := ctx.KVStore(storeKey) iterator := sdk.KVStorePrefixIterator(store, host.KeyClientStorePrefix) diff --git a/modules/core/exported/client.go b/modules/core/exported/client.go index 1578900af21..de4cbe48c8a 100644 --- a/modules/core/exported/client.go +++ b/modules/core/exported/client.go @@ -2,10 +2,9 @@ package exported import ( ics23 "github.com/confio/ics23/go" - proto "github.com/gogo/protobuf/proto" - "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" + proto "github.com/gogo/protobuf/proto" ) // Status represents the status of a client diff --git a/modules/core/keeper/migrations.go b/modules/core/keeper/migrations.go index 801a91bc42f..c6691005bd3 100644 --- a/modules/core/keeper/migrations.go +++ b/modules/core/keeper/migrations.go @@ -18,11 +18,10 @@ func NewMigrator(keeper Keeper) Migrator { // Migrate1to2 migrates from version 1 to 2. // This migration prunes: -// - solo machine consensus states -// - expired tendermint consensus states -// -// This migration migrates: -// - solo machine client state from protobuf definition v1 to v2 +// - migrates solo machine client state from protobuf definition v1 to v2 +// - prunes solo machine consensus states +// - prunes expired tendermint consensus states +// - adds ProcessedHeight and Iteration keys for unexpired tendermint consensus states func (m Migrator) Migrate1to2(ctx sdk.Context) error { clientMigrator := clientkeeper.NewMigrator(m.keeper.ClientKeeper) if err := clientMigrator.Migrate1to2(ctx); err != nil { diff --git a/modules/core/legacy/v100/genesis.go b/modules/core/legacy/v100/genesis.go index 28fbc1a42a4..9ea57a3087d 100644 --- a/modules/core/legacy/v100/genesis.go +++ b/modules/core/legacy/v100/genesis.go @@ -1,13 +1,13 @@ package v100 import ( - "time" - "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/codec" genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types" + tmtypes "github.com/tendermint/tendermint/types" clientv100 "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100" + clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types" host "github.com/cosmos/ibc-go/modules/core/24-host" "github.com/cosmos/ibc-go/modules/core/types" ) @@ -17,13 +17,16 @@ import ( // - Update solo machine client state protobuf definition (v1 to v2) // - Remove all solo machine consensus states // - Remove all expired tendermint consensus states -func MigrateGenesis(appState genutiltypes.AppMap, clientCtx client.Context, genesisBlockTime time.Time) (genutiltypes.AppMap, error) { +func MigrateGenesis(appState genutiltypes.AppMap, clientCtx client.Context, genDoc tmtypes.GenesisDoc) (genutiltypes.AppMap, error) { if appState[host.ModuleName] != nil { + // ensure legacy solo machines are registered + clientv100.RegisterInterfaces(clientCtx.InterfaceRegistry) + // unmarshal relative source genesis application state ibcGenState := &types.GenesisState{} clientCtx.JSONCodec.MustUnmarshalJSON(appState[host.ModuleName], ibcGenState) - clientGenState, err := clientv100.MigrateGenesis(codec.NewProtoCodec(clientCtx.InterfaceRegistry), &ibcGenState.ClientGenesis, genesisBlockTime) + clientGenState, err := clientv100.MigrateGenesis(codec.NewProtoCodec(clientCtx.InterfaceRegistry), &ibcGenState.ClientGenesis, genDoc.GenesisTime, clienttypes.NewHeight(clienttypes.ParseChainID(genDoc.ChainID), uint64(genDoc.InitialHeight))) if err != nil { return nil, err } diff --git a/modules/core/legacy/v100/genesis_test.go b/modules/core/legacy/v100/genesis_test.go new file mode 100644 index 00000000000..7f27e3e2407 --- /dev/null +++ b/modules/core/legacy/v100/genesis_test.go @@ -0,0 +1,198 @@ +package v100_test + +import ( + "bytes" + "testing" + + "github.com/cosmos/cosmos-sdk/client" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types" + "github.com/stretchr/testify/suite" + tmtypes "github.com/tendermint/tendermint/types" + + ibcclient "github.com/cosmos/ibc-go/modules/core/02-client" + clientv100 "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100" + clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types" + host "github.com/cosmos/ibc-go/modules/core/24-host" + "github.com/cosmos/ibc-go/modules/core/legacy/v100" + "github.com/cosmos/ibc-go/modules/core/types" + ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/ibc-go/testing" + "github.com/cosmos/ibc-go/testing/simapp" +) + +type LegacyTestSuite struct { + suite.Suite + + coordinator *ibctesting.Coordinator + + // testing chains used for convenience and readability + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain +} + +// TestLegacyTestSuite runs all the tests within this package. +func TestLegacyTestSuite(t *testing.T) { + suite.Run(t, new(LegacyTestSuite)) +} + +// SetupTest creates a coordinator with 2 test chains. +func (suite *LegacyTestSuite) SetupTest() { + suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) + suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0)) + suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1)) + // commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1) + suite.coordinator.CommitNBlocks(suite.chainA, 2) + suite.coordinator.CommitNBlocks(suite.chainB, 2) +} + +// NOTE: this test is mainly copied from 02-client/legacy/v100 +func (suite *LegacyTestSuite) TestMigrateGenesisSolomachine() { + path := ibctesting.NewPath(suite.chainA, suite.chainB) + encodingConfig := simapp.MakeTestEncodingConfig() + clientCtx := client.Context{}. + WithInterfaceRegistry(encodingConfig.InterfaceRegistry). + WithTxConfig(encodingConfig.TxConfig). + WithJSONCodec(encodingConfig.Marshaler) + + // create multiple legacy solo machine clients + solomachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-0", "testing", 1) + solomachineMulti := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-1", "testing", 4) + + // create tendermint clients + suite.coordinator.SetupClients(path) + clientGenState := ibcclient.ExportGenesis(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper) + + // manually generate old proto buf definitions and set in genesis + // NOTE: we cannot use 'ExportGenesis' for the solo machines since we are + // using client states and consensus states which do not implement the exported.ClientState + // and exported.ConsensusState interface + var clients []clienttypes.IdentifiedClientState + for _, sm := range []*ibctesting.Solomachine{solomachine, solomachineMulti} { + clientState := sm.ClientState() + + var seq uint64 + if clientState.IsFrozen { + seq = 1 + } + + // generate old client state proto defintion + legacyClientState := &clientv100.ClientState{ + Sequence: clientState.Sequence, + FrozenSequence: seq, + ConsensusState: &clientv100.ConsensusState{ + PublicKey: clientState.ConsensusState.PublicKey, + Diversifier: clientState.ConsensusState.Diversifier, + Timestamp: clientState.ConsensusState.Timestamp, + }, + AllowUpdateAfterProposal: clientState.AllowUpdateAfterProposal, + } + + // set client state + any, err := codectypes.NewAnyWithValue(legacyClientState) + suite.Require().NoError(err) + suite.Require().NotNil(any) + client := clienttypes.IdentifiedClientState{ + ClientId: sm.ClientID, + ClientState: any, + } + clients = append(clients, client) + + // set in store for ease of determining expected genesis + clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(path.EndpointA.Chain.GetContext(), sm.ClientID) + bz, err := path.EndpointA.Chain.App.AppCodec().MarshalInterface(legacyClientState) + suite.Require().NoError(err) + clientStore.Set(host.ClientStateKey(), bz) + + // set some consensus states + height1 := clienttypes.NewHeight(0, 1) + height2 := clienttypes.NewHeight(1, 2) + height3 := clienttypes.NewHeight(0, 123) + + any, err = codectypes.NewAnyWithValue(legacyClientState.ConsensusState) + suite.Require().NoError(err) + suite.Require().NotNil(any) + consensusState1 := clienttypes.ConsensusStateWithHeight{ + Height: height1, + ConsensusState: any, + } + consensusState2 := clienttypes.ConsensusStateWithHeight{ + Height: height2, + ConsensusState: any, + } + consensusState3 := clienttypes.ConsensusStateWithHeight{ + Height: height3, + ConsensusState: any, + } + + clientConsensusState := clienttypes.ClientConsensusStates{ + ClientId: sm.ClientID, + ConsensusStates: []clienttypes.ConsensusStateWithHeight{consensusState1, consensusState2, consensusState3}, + } + + clientGenState.ClientsConsensus = append(clientGenState.ClientsConsensus, clientConsensusState) + + // set in store for ease of determining expected genesis + bz, err = path.EndpointA.Chain.App.AppCodec().MarshalInterface(legacyClientState.ConsensusState) + suite.Require().NoError(err) + clientStore.Set(host.ConsensusStateKey(height1), bz) + clientStore.Set(host.ConsensusStateKey(height2), bz) + clientStore.Set(host.ConsensusStateKey(height3), bz) + } + // solo machine clients must come before tendermint in expected + clientGenState.Clients = append(clients, clientGenState.Clients...) + + // migrate store get expected genesis + // store migration and genesis migration should produce identical results + err := clientv100.MigrateStore(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.GetSimApp().GetKey(host.StoreKey), path.EndpointA.Chain.App.AppCodec()) + suite.Require().NoError(err) + expectedClientGenState := ibcclient.ExportGenesis(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper) + + // 'ExportGenesis' order metadata keys by processedheight, processedtime for all heights, then it appends all iteration keys + // In order to match the genesis migration with export genesis we must reorder the iteration keys to be last + // This isn't ideal, but it is better than modifying the genesis migration from a previous version to match the export genesis of a new version + // which provides no benefit except nicer testing + for i, clientMetadata := range expectedClientGenState.ClientsMetadata { + var updatedMetadata []clienttypes.GenesisMetadata + var iterationKeys []clienttypes.GenesisMetadata + for _, metadata := range clientMetadata.ClientMetadata { + if bytes.HasPrefix(metadata.Key, []byte(ibctmtypes.KeyIterateConsensusStatePrefix)) { + iterationKeys = append(iterationKeys, metadata) + } else { + updatedMetadata = append(updatedMetadata, metadata) + } + } + updatedMetadata = append(updatedMetadata, iterationKeys...) + expectedClientGenState.ClientsMetadata[i] = clienttypes.IdentifiedGenesisMetadata{ + ClientId: clientMetadata.ClientId, + ClientMetadata: updatedMetadata, + } + } + + // NOTE: these lines are added in comparison to 02-client/legacy/v100 + // generate appState with old ibc genesis state + appState := genutiltypes.AppMap{} + ibcGenState := types.DefaultGenesisState() + ibcGenState.ClientGenesis = clientGenState + clientv100.RegisterInterfaces(clientCtx.InterfaceRegistry) + appState[host.ModuleName] = clientCtx.JSONCodec.MustMarshalJSON(ibcGenState) + genDoc := tmtypes.GenesisDoc{ + ChainID: suite.chainA.ChainID, + GenesisTime: suite.coordinator.CurrentTime, + InitialHeight: suite.chainA.GetContext().BlockHeight(), + } + + // NOTE: genesis time isn't updated since we aren't testing for tendermint consensus state pruning + migrated, err := v100.MigrateGenesis(appState, clientCtx, genDoc) + suite.Require().NoError(err) + + expectedAppState := genutiltypes.AppMap{} + expectedIBCGenState := types.DefaultGenesisState() + expectedIBCGenState.ClientGenesis = expectedClientGenState + + bz, err := clientCtx.JSONCodec.MarshalJSON(expectedIBCGenState) + suite.Require().NoError(err) + expectedAppState[host.ModuleName] = bz + + suite.Require().Equal(expectedAppState, migrated) +} From 7ceca613e1a53a6339506de523eeefedab480c45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Colin=20Axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Tue, 15 Jun 2021 11:29:58 +0200 Subject: [PATCH 17/21] add migration support for max expected time per block --- docs/ibc/proto-docs.md | 2 +- docs/migrations/ibc-migration-043.md | 27 +++++++++++++++++-- .../core/03-connection/types/connection.pb.go | 6 ++--- modules/core/03-connection/types/params.go | 2 +- modules/core/legacy/v100/genesis.go | 13 ++++++++- modules/core/legacy/v100/genesis_test.go | 3 ++- proto/ibc/core/connection/v1/connection.proto | 6 ++--- 7 files changed, 47 insertions(+), 12 deletions(-) diff --git a/docs/ibc/proto-docs.md b/docs/ibc/proto-docs.md index b3937fc3d0e..cae068cf68c 100644 --- a/docs/ibc/proto-docs.md +++ b/docs/ibc/proto-docs.md @@ -2425,7 +2425,7 @@ Params defines the set of Connection parameters. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| `max_expected_time_per_block` | [uint64](#uint64) | | maximum expected time per block, used to enforce block delay. This parameter should reflect the largest amount of time that the chain might reasonably take to produce the next block under normal operating conditions. A safe choice is 3-5x the expected time per block. | +| `max_expected_time_per_block` | [uint64](#uint64) | | maximum expected time per block (in nanoseconds), used to enforce block delay. This parameter should reflect the largest amount of time that the chain might reasonably take to produce the next block under normal operating conditions. A safe choice is 3-5x the expected time per block. | diff --git a/docs/migrations/ibc-migration-043.md b/docs/migrations/ibc-migration-043.md index 6a3a3828b5a..547f3e54eb0 100644 --- a/docs/migrations/ibc-migration-043.md +++ b/docs/migrations/ibc-migration-043.md @@ -38,9 +38,29 @@ Both in-place store migrations and genesis migrations will: - prune all solo machine consensus states - prune all expired tendermint consensus states +Chains must set a new connection parameter during either in place store migrations or genesis migration. The new parameter, max expected block time, is used to enforce packet processing delays on the receiving end of an IBC packet flow. Checkout the [docs](https://github.com/cosmos/ibc-go/blob/release/v1.0.x/docs/ibc/proto-docs.md#params-2) for more information. + ### In-Place Store Migrations -In place store migrations will automatically be run after the binary is swapped and the application is started again during an upgrade proposal. +The new chain binary will need to run migrations in the upgrade handler. The fromVM (previous module version) for the IBC module should be 1. This will allow migrations to be run for IBC updating the version from 1 to 2. + +Ex: +```go +app.UpgradeKeeper.SetUpgradeHandler("my-upgrade-proposal", + func(ctx sdk.Context, _ upgradetypes.Plan, _ module.VersionMap) (module.VersionMap, error) { + // set max expected block time parameter. Replace the default with your expected value + // https://github.com/cosmos/ibc-go/blob/release/v1.0.x/docs/ibc/proto-docs.md#params-2 + app.IBCKeeper.ConnectionKeeper.SetParams(ctx, ibcconnectiontypes.DefaultParams()) + + fromVM := map[string]uint64{ + ... // other modules + "ibc": 1, + ... + } + return app.mm.RunMigrations(ctx, app.configurator, fromVM) + }) + +``` ### Genesis Migrations @@ -56,7 +76,9 @@ import ( ... // add in migrate cmd function -newGenState, err = ibcv100.MigrateGenesis(clientCtx.InterfaceRegistry, newGenState, genDoc) +// expectedTimePerBlock is a new connection parameter +// https://github.com/cosmos/ibc-go/blob/release/v1.0.x/docs/ibc/proto-docs.md#params-2 +newGenState, err = ibcv100.MigrateGenesis(clientCtx.InterfaceRegistry, newGenState, genDoc, expectedTimePerBlock) if err != nil { return err } @@ -64,6 +86,7 @@ if err != nil { **NOTE:** The genesis chain-id, time and height MUST be updated before migrating IBC, otherwise the tendermint consensus state will not be pruned. + ## IBC Keeper Changes The IBC Keeper now takes in the Upgrade Keeper. Please add the chains' Upgrade Keeper after the Staking Keeper: diff --git a/modules/core/03-connection/types/connection.pb.go b/modules/core/03-connection/types/connection.pb.go index 07577489103..6fbe5ba997f 100644 --- a/modules/core/03-connection/types/connection.pb.go +++ b/modules/core/03-connection/types/connection.pb.go @@ -356,9 +356,9 @@ var xxx_messageInfo_Version proto.InternalMessageInfo // Params defines the set of Connection parameters. type Params struct { - // maximum expected time per block, used to enforce block delay. This parameter should reflect the largest amount of - // time that the chain might reasonably take to produce the next block under normal operating conditions. A safe - // choice is 3-5x the expected time per block. + // maximum expected time per block (in nanoseconds), used to enforce block delay. This parameter should reflect the + // largest amount of time that the chain might reasonably take to produce the next block under normal operating + // conditions. A safe choice is 3-5x the expected time per block. MaxExpectedTimePerBlock uint64 `protobuf:"varint,1,opt,name=max_expected_time_per_block,json=maxExpectedTimePerBlock,proto3" json:"max_expected_time_per_block,omitempty" yaml:"max_expected_time_per_block"` } diff --git a/modules/core/03-connection/types/params.go b/modules/core/03-connection/types/params.go index 904bde60415..35677062fdb 100644 --- a/modules/core/03-connection/types/params.go +++ b/modules/core/03-connection/types/params.go @@ -7,7 +7,7 @@ import ( paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" ) -// DefaultTimePerBlock is the default value for maximum expected time per block. +// DefaultTimePerBlock is the default value for maximum expected time per block (in nanoseconds). const DefaultTimePerBlock = 30 * time.Second // KeyMaxExpectedTimePerBlock is store's key for MaxExpectedTimePerBlock parameter diff --git a/modules/core/legacy/v100/genesis.go b/modules/core/legacy/v100/genesis.go index 9ea57a3087d..42932613499 100644 --- a/modules/core/legacy/v100/genesis.go +++ b/modules/core/legacy/v100/genesis.go @@ -8,6 +8,7 @@ import ( clientv100 "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100" clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types" + connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types" host "github.com/cosmos/ibc-go/modules/core/24-host" "github.com/cosmos/ibc-go/modules/core/types" ) @@ -17,7 +18,7 @@ import ( // - Update solo machine client state protobuf definition (v1 to v2) // - Remove all solo machine consensus states // - Remove all expired tendermint consensus states -func MigrateGenesis(appState genutiltypes.AppMap, clientCtx client.Context, genDoc tmtypes.GenesisDoc) (genutiltypes.AppMap, error) { +func MigrateGenesis(appState genutiltypes.AppMap, clientCtx client.Context, genDoc tmtypes.GenesisDoc, maxExpectedTimePerBlock uint64) (genutiltypes.AppMap, error) { if appState[host.ModuleName] != nil { // ensure legacy solo machines are registered clientv100.RegisterInterfaces(clientCtx.InterfaceRegistry) @@ -33,6 +34,16 @@ func MigrateGenesis(appState genutiltypes.AppMap, clientCtx client.Context, genD ibcGenState.ClientGenesis = *clientGenState + // set max expected time per block + connectionGenesis := connectiontypes.GenesisState{ + Connections: ibcGenState.ConnectionGenesis.Connections, + ClientConnectionPaths: ibcGenState.ConnectionGenesis.ClientConnectionPaths, + NextConnectionSequence: ibcGenState.ConnectionGenesis.NextConnectionSequence, + Params: connectiontypes.NewParams(maxExpectedTimePerBlock), + } + + ibcGenState.ConnectionGenesis = connectionGenesis + // delete old genesis state delete(appState, host.ModuleName) diff --git a/modules/core/legacy/v100/genesis_test.go b/modules/core/legacy/v100/genesis_test.go index 7f27e3e2407..67e3c01f52d 100644 --- a/modules/core/legacy/v100/genesis_test.go +++ b/modules/core/legacy/v100/genesis_test.go @@ -13,6 +13,7 @@ import ( ibcclient "github.com/cosmos/ibc-go/modules/core/02-client" clientv100 "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100" clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types" + connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types" host "github.com/cosmos/ibc-go/modules/core/24-host" "github.com/cosmos/ibc-go/modules/core/legacy/v100" "github.com/cosmos/ibc-go/modules/core/types" @@ -183,7 +184,7 @@ func (suite *LegacyTestSuite) TestMigrateGenesisSolomachine() { } // NOTE: genesis time isn't updated since we aren't testing for tendermint consensus state pruning - migrated, err := v100.MigrateGenesis(appState, clientCtx, genDoc) + migrated, err := v100.MigrateGenesis(appState, clientCtx, genDoc, uint64(connectiontypes.DefaultTimePerBlock)) suite.Require().NoError(err) expectedAppState := genutiltypes.AppMap{} diff --git a/proto/ibc/core/connection/v1/connection.proto b/proto/ibc/core/connection/v1/connection.proto index e09f1529d92..72c0ff7daa0 100644 --- a/proto/ibc/core/connection/v1/connection.proto +++ b/proto/ibc/core/connection/v1/connection.proto @@ -107,8 +107,8 @@ message Version { // Params defines the set of Connection parameters. message Params { - // maximum expected time per block, used to enforce block delay. This parameter should reflect the largest amount of - // time that the chain might reasonably take to produce the next block under normal operating conditions. A safe - // choice is 3-5x the expected time per block. + // maximum expected time per block (in nanoseconds), used to enforce block delay. This parameter should reflect the + // largest amount of time that the chain might reasonably take to produce the next block under normal operating + // conditions. A safe choice is 3-5x the expected time per block. uint64 max_expected_time_per_block = 1 [(gogoproto.moretags) = "yaml:\"max_expected_time_per_block\""]; } From 0fcd0c4f996ba989c97dd7ef08e291016f34ea1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Colin=20Axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Tue, 15 Jun 2021 11:38:04 +0200 Subject: [PATCH 18/21] fix docs --- docs/migrations/ibc-migration-043.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/migrations/ibc-migration-043.md b/docs/migrations/ibc-migration-043.md index b52864f1b1e..82154645013 100644 --- a/docs/migrations/ibc-migration-043.md +++ b/docs/migrations/ibc-migration-043.md @@ -78,9 +78,9 @@ import ( // add in migrate cmd function // expectedTimePerBlock is a new connection parameter // https://github.com/cosmos/ibc-go/blob/release/v1.0.x/docs/ibc/proto-docs.md#params-2 -newGenState, err = ibcv100.MigrateGenesis(clientCtx.InterfaceRegistry, newGenState, genDoc, expectedTimePerBlock) +newGenState, err = ibcv100.MigrateGenesis(newGenState, clientCtx, *genDoc, expectedTimePerBlock) if err != nil { - return err + return err } ``` From 900b907c37fbeb051db934b9a3cd5b1bd37f4049 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Colin=20Axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Wed, 16 Jun 2021 17:31:26 +0200 Subject: [PATCH 19/21] fix bug found by Aditya The genesis client metadata was being set independently for each unexpired height. It needed to be moved outside the unexpired for loop --- modules/core/02-client/legacy/v100/genesis.go | 26 ++++++++------- .../02-client/legacy/v100/genesis_test.go | 32 +++++++++++-------- modules/core/legacy/v100/genesis_test.go | 25 ++------------- 3 files changed, 35 insertions(+), 48 deletions(-) diff --git a/modules/core/02-client/legacy/v100/genesis.go b/modules/core/02-client/legacy/v100/genesis.go index b62ea64632f..8df031cfe53 100644 --- a/modules/core/02-client/legacy/v100/genesis.go +++ b/modules/core/02-client/legacy/v100/genesis.go @@ -90,6 +90,9 @@ func MigrateGenesis(cdc codec.BinaryCodec, clientGenState *types.GenesisState, g }) } + // collect metadata for unexpired consensus states + var clientMetadata []types.GenesisMetadata + // remove all expired tendermint consensus state metadata by adding only // unexpired consensus state metadata for _, consState := range unexpiredConsensusStates { @@ -100,9 +103,8 @@ func MigrateGenesis(cdc codec.BinaryCodec, clientGenState *types.GenesisState, g // obtain height for consensus state being pruned height := consState.Height - // iterate through metadata and find metadata which should be pruned - // only unexpired consensus state heights should be added - var clientMetadata []types.GenesisMetadata + // iterate through metadata and find metadata for current unexpired height + // only unexpired consensus state metadata should be added for _, metadata := range identifiedGenMetadata.ClientMetadata { // the previous version of IBC only contained the processed time metadata // if we find the processed time metadata for an unexpired height, add the @@ -112,7 +114,7 @@ func MigrateGenesis(cdc codec.BinaryCodec, clientGenState *types.GenesisState, g Key: ibctmtypes.ProcessedHeightKey(height), Value: []byte(selfHeight.String()), }) - clientMetadata = append(clientMetadata, metadata) + clientMetadata = append(clientMetadata, metadata) // processed time clientMetadata = append(clientMetadata, types.GenesisMetadata{ Key: ibctmtypes.IterationKey(height), Value: host.ConsensusStateKey(height), @@ -121,15 +123,17 @@ func MigrateGenesis(cdc codec.BinaryCodec, clientGenState *types.GenesisState, g } } - // if we have metadata for unexipred consensus states, add it to consensusMetadata - if len(clientMetadata) != 0 { - clientsMetadata = append(clientsMetadata, types.IdentifiedGenesisMetadata{ - ClientId: client.ClientId, - ClientMetadata: clientMetadata, - }) - } } } + + } + + // if we have metadata for unexipred consensus states, add it to consensusMetadata + if len(clientMetadata) != 0 { + clientsMetadata = append(clientsMetadata, types.IdentifiedGenesisMetadata{ + ClientId: client.ClientId, + ClientMetadata: clientMetadata, + }) } default: diff --git a/modules/core/02-client/legacy/v100/genesis_test.go b/modules/core/02-client/legacy/v100/genesis_test.go index c27b4f7a6dc..9ba19771e1f 100644 --- a/modules/core/02-client/legacy/v100/genesis_test.go +++ b/modules/core/02-client/legacy/v100/genesis_test.go @@ -34,6 +34,8 @@ func (suite *LegacyTestSuite) TestMigrateGenesisSolomachine() { // create tendermint clients suite.coordinator.SetupClients(path) + err := path.EndpointA.UpdateClient() + suite.Require().NoError(err) clientGenState := ibcclient.ExportGenesis(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper) // manually generate old proto buf definitions and set in genesis @@ -117,15 +119,19 @@ func (suite *LegacyTestSuite) TestMigrateGenesisSolomachine() { // migrate store get expected genesis // store migration and genesis migration should produce identical results - err := v100.MigrateStore(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.GetSimApp().GetKey(host.StoreKey), path.EndpointA.Chain.App.AppCodec()) + err = v100.MigrateStore(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.GetSimApp().GetKey(host.StoreKey), path.EndpointA.Chain.App.AppCodec()) suite.Require().NoError(err) expectedClientGenState := ibcclient.ExportGenesis(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper) + // NOTE: genesis time isn't updated since we aren't testing for tendermint consensus state pruning + migrated, err := v100.MigrateGenesis(codec.NewProtoCodec(clientCtx.InterfaceRegistry), &clientGenState, suite.coordinator.CurrentTime, types.GetSelfHeight(suite.chainA.GetContext())) + suite.Require().NoError(err) + // 'ExportGenesis' order metadata keys by processedheight, processedtime for all heights, then it appends all iteration keys - // In order to match the genesis migration with export genesis we must reorder the iteration keys to be last + // In order to match the genesis migration with export genesis (from store migrations) we must reorder the iteration keys to be last // This isn't ideal, but it is better than modifying the genesis migration from a previous version to match the export genesis of a new version // which provides no benefit except nicer testing - for i, clientMetadata := range expectedClientGenState.ClientsMetadata { + for i, clientMetadata := range migrated.ClientsMetadata { var updatedMetadata []types.GenesisMetadata var iterationKeys []types.GenesisMetadata for _, metadata := range clientMetadata.ClientMetadata { @@ -136,16 +142,12 @@ func (suite *LegacyTestSuite) TestMigrateGenesisSolomachine() { } } updatedMetadata = append(updatedMetadata, iterationKeys...) - expectedClientGenState.ClientsMetadata[i] = types.IdentifiedGenesisMetadata{ + migrated.ClientsMetadata[i] = types.IdentifiedGenesisMetadata{ ClientId: clientMetadata.ClientId, ClientMetadata: updatedMetadata, } } - // NOTE: genesis time isn't updated since we aren't testing for tendermint consensus state pruning - migrated, err := v100.MigrateGenesis(codec.NewProtoCodec(clientCtx.InterfaceRegistry), &clientGenState, suite.coordinator.CurrentTime, types.GetSelfHeight(suite.chainA.GetContext())) - suite.Require().NoError(err) - bz, err := clientCtx.JSONCodec.MarshalJSON(&expectedClientGenState) suite.Require().NoError(err) @@ -203,6 +205,8 @@ func (suite *LegacyTestSuite) TestMigrateGenesisTendermint() { // create the consensus state that can be used as trusted height for next update path1.EndpointA.UpdateClient() + path1.EndpointA.UpdateClient() + path2.EndpointA.UpdateClient() path2.EndpointA.UpdateClient() clientGenState := ibcclient.ExportGenesis(suite.chainA.GetContext(), suite.chainA.App.GetIBCKeeper().ClientKeeper) @@ -221,11 +225,15 @@ func (suite *LegacyTestSuite) TestMigrateGenesisTendermint() { suite.Require().NoError(err) expectedClientGenState := ibcclient.ExportGenesis(path1.EndpointA.Chain.GetContext(), path1.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper) + migrated, err := v100.MigrateGenesis(codec.NewProtoCodec(clientCtx.InterfaceRegistry), &clientGenState, suite.coordinator.CurrentTime, types.GetSelfHeight(suite.chainA.GetContext())) + suite.Require().NoError(err) + // 'ExportGenesis' order metadata keys by processedheight, processedtime for all heights, then it appends all iteration keys // In order to match the genesis migration with export genesis we must reorder the iteration keys to be last // This isn't ideal, but it is better than modifying the genesis migration from a previous version to match the export genesis of a new version // which provides no benefit except nicer testing - for i, clientMetadata := range expectedClientGenState.ClientsMetadata { + for i, clientMetadata := range migrated.ClientsMetadata { + i := i var updatedMetadata []types.GenesisMetadata var iterationKeys []types.GenesisMetadata for _, metadata := range clientMetadata.ClientMetadata { @@ -236,15 +244,12 @@ func (suite *LegacyTestSuite) TestMigrateGenesisTendermint() { } } updatedMetadata = append(updatedMetadata, iterationKeys...) - expectedClientGenState.ClientsMetadata[i] = types.IdentifiedGenesisMetadata{ + migrated.ClientsMetadata[i] = types.IdentifiedGenesisMetadata{ ClientId: clientMetadata.ClientId, ClientMetadata: updatedMetadata, } } - migrated, err := v100.MigrateGenesis(codec.NewProtoCodec(clientCtx.InterfaceRegistry), &clientGenState, suite.coordinator.CurrentTime, types.GetSelfHeight(suite.chainA.GetContext())) - suite.Require().NoError(err) - // check path 1 client pruning for _, height := range path1PruneHeights { for _, client := range migrated.ClientsConsensus { @@ -308,6 +313,5 @@ func (suite *LegacyTestSuite) TestMigrateGenesisTendermint() { fmt.Println(string(indentedBz)) - suite.Require().Equal(string(expectedIndentedBz), string(indentedBz)) } diff --git a/modules/core/legacy/v100/genesis_test.go b/modules/core/legacy/v100/genesis_test.go index 67e3c01f52d..d4e53d9fba2 100644 --- a/modules/core/legacy/v100/genesis_test.go +++ b/modules/core/legacy/v100/genesis_test.go @@ -1,7 +1,6 @@ package v100_test import ( - "bytes" "testing" "github.com/cosmos/cosmos-sdk/client" @@ -17,7 +16,6 @@ import ( host "github.com/cosmos/ibc-go/modules/core/24-host" "github.com/cosmos/ibc-go/modules/core/legacy/v100" "github.com/cosmos/ibc-go/modules/core/types" - ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types" ibctesting "github.com/cosmos/ibc-go/testing" "github.com/cosmos/ibc-go/testing/simapp" ) @@ -61,6 +59,8 @@ func (suite *LegacyTestSuite) TestMigrateGenesisSolomachine() { solomachineMulti := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-1", "testing", 4) // create tendermint clients + // NOTE: only 1 set of metadata is created, we aren't testing ordering + // The purpose of this test is to ensure the genesis states can be marshalled/unmarshalled suite.coordinator.SetupClients(path) clientGenState := ibcclient.ExportGenesis(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper) @@ -149,27 +149,6 @@ func (suite *LegacyTestSuite) TestMigrateGenesisSolomachine() { suite.Require().NoError(err) expectedClientGenState := ibcclient.ExportGenesis(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper) - // 'ExportGenesis' order metadata keys by processedheight, processedtime for all heights, then it appends all iteration keys - // In order to match the genesis migration with export genesis we must reorder the iteration keys to be last - // This isn't ideal, but it is better than modifying the genesis migration from a previous version to match the export genesis of a new version - // which provides no benefit except nicer testing - for i, clientMetadata := range expectedClientGenState.ClientsMetadata { - var updatedMetadata []clienttypes.GenesisMetadata - var iterationKeys []clienttypes.GenesisMetadata - for _, metadata := range clientMetadata.ClientMetadata { - if bytes.HasPrefix(metadata.Key, []byte(ibctmtypes.KeyIterateConsensusStatePrefix)) { - iterationKeys = append(iterationKeys, metadata) - } else { - updatedMetadata = append(updatedMetadata, metadata) - } - } - updatedMetadata = append(updatedMetadata, iterationKeys...) - expectedClientGenState.ClientsMetadata[i] = clienttypes.IdentifiedGenesisMetadata{ - ClientId: clientMetadata.ClientId, - ClientMetadata: updatedMetadata, - } - } - // NOTE: these lines are added in comparison to 02-client/legacy/v100 // generate appState with old ibc genesis state appState := genutiltypes.AppMap{} From fed9443e27eb84d4f57dc907e5407f2fd8b74c73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Colin=20Axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Wed, 16 Jun 2021 17:34:27 +0200 Subject: [PATCH 20/21] remove unnecessary code --- modules/core/02-client/legacy/v100/genesis_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/core/02-client/legacy/v100/genesis_test.go b/modules/core/02-client/legacy/v100/genesis_test.go index 9ba19771e1f..aec6bc16d73 100644 --- a/modules/core/02-client/legacy/v100/genesis_test.go +++ b/modules/core/02-client/legacy/v100/genesis_test.go @@ -233,7 +233,6 @@ func (suite *LegacyTestSuite) TestMigrateGenesisTendermint() { // This isn't ideal, but it is better than modifying the genesis migration from a previous version to match the export genesis of a new version // which provides no benefit except nicer testing for i, clientMetadata := range migrated.ClientsMetadata { - i := i var updatedMetadata []types.GenesisMetadata var iterationKeys []types.GenesisMetadata for _, metadata := range clientMetadata.ClientMetadata { From e595b11ba01be6b725c2331b6fabbf1ade49d93c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Colin=20Axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Thu, 17 Jun 2021 13:36:05 +0200 Subject: [PATCH 21/21] apply Aditya review suggestions, fix bug There was a bug in adding consensus metadata since it relied on the iteration key not yet set. This is fixed by using traditional iteration using the consensus state key, setting metadata for all consensus states, and then pruning expired consensus states. The store test has been updated to set create two tendermint clients --- modules/core/02-client/legacy/v100/genesis.go | 22 +-- .../02-client/legacy/v100/genesis_test.go | 5 - modules/core/02-client/legacy/v100/store.go | 32 ++-- .../core/02-client/legacy/v100/store_test.go | 138 +++++++++++------- 4 files changed, 109 insertions(+), 88 deletions(-) diff --git a/modules/core/02-client/legacy/v100/genesis.go b/modules/core/02-client/legacy/v100/genesis.go index 8df031cfe53..65aa4210e13 100644 --- a/modules/core/02-client/legacy/v100/genesis.go +++ b/modules/core/02-client/legacy/v100/genesis.go @@ -110,15 +110,19 @@ func MigrateGenesis(cdc codec.BinaryCodec, clientGenState *types.GenesisState, g // if we find the processed time metadata for an unexpired height, add the // iteration key and processed height keys. if bytes.Equal(metadata.Key, ibctmtypes.ProcessedTimeKey(height)) { - clientMetadata = append(clientMetadata, types.GenesisMetadata{ - Key: ibctmtypes.ProcessedHeightKey(height), - Value: []byte(selfHeight.String()), - }) - clientMetadata = append(clientMetadata, metadata) // processed time - clientMetadata = append(clientMetadata, types.GenesisMetadata{ - Key: ibctmtypes.IterationKey(height), - Value: host.ConsensusStateKey(height), - }) + clientMetadata = append(clientMetadata, + // set the processed height using the current self height + // this is safe, it may cause delays in packet processing if there + // is a non zero connection delay time + types.GenesisMetadata{ + Key: ibctmtypes.ProcessedHeightKey(height), + Value: []byte(selfHeight.String()), + }, + metadata, // processed time + types.GenesisMetadata{ + Key: ibctmtypes.IterationKey(height), + Value: host.ConsensusStateKey(height), + }) } } diff --git a/modules/core/02-client/legacy/v100/genesis_test.go b/modules/core/02-client/legacy/v100/genesis_test.go index aec6bc16d73..0c3235c6582 100644 --- a/modules/core/02-client/legacy/v100/genesis_test.go +++ b/modules/core/02-client/legacy/v100/genesis_test.go @@ -3,7 +3,6 @@ package v100_test import ( "bytes" "encoding/json" - "fmt" "time" "github.com/cosmos/cosmos-sdk/client" @@ -167,8 +166,6 @@ func (suite *LegacyTestSuite) TestMigrateGenesisSolomachine() { indentedBz, err := json.MarshalIndent(jsonObj, "", "\t") suite.Require().NoError(err) - fmt.Println(string(indentedBz)) - suite.Require().Equal(string(expectedIndentedBz), string(indentedBz)) } @@ -310,7 +307,5 @@ func (suite *LegacyTestSuite) TestMigrateGenesisTendermint() { indentedBz, err := json.MarshalIndent(jsonObj, "", "\t") suite.Require().NoError(err) - fmt.Println(string(indentedBz)) - suite.Require().Equal(string(expectedIndentedBz), string(indentedBz)) } diff --git a/modules/core/02-client/legacy/v100/store.go b/modules/core/02-client/legacy/v100/store.go index 9dfb68c7e1e..842086f956b 100644 --- a/modules/core/02-client/legacy/v100/store.go +++ b/modules/core/02-client/legacy/v100/store.go @@ -93,11 +93,12 @@ func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) return sdkerrors.Wrap(types.ErrInvalidClient, "client state is not tendermint even though client id contains 07-tendermint") } - if err = ibctmtypes.PruneAllExpiredConsensusStates(ctx, clientStore, cdc, tmClientState); err != nil { + // add iteration keys so pruning will be successful + if err = addConsensusMetadata(ctx, clientStore, cdc, tmClientState); err != nil { return err } - if err = addConsensusMetadata(ctx, clientStore, cdc, tmClientState); err != nil { + if err = ibctmtypes.PruneAllExpiredConsensusStates(ctx, clientStore, cdc, tmClientState); err != nil { return err } @@ -150,27 +151,22 @@ func pruneSolomachineConsensusStates(clientStore sdk.KVStore) { } } -// addConsensusMetadata adds the iteration key and processed height for all unexpired tendermint consensus states -// These keys were not included in the previous release of the IBC module. +// addConsensusMetadata adds the iteration key and processed height for all tendermint consensus states +// These keys were not included in the previous release of the IBC module. Adding the iteration keys allows +// for pruning iteration. func addConsensusMetadata(ctx sdk.Context, clientStore sdk.KVStore, cdc codec.BinaryCodec, clientState *ibctmtypes.ClientState) error { var heights []exported.Height + iterator := sdk.KVStorePrefixIterator(clientStore, []byte(host.KeyConsensusStatePrefix)) - metadataCb := func(height exported.Height) bool { - consState, err := ibctmtypes.GetConsensusState(clientStore, cdc, height) - // this error should never occur - if err != nil { - return true - } - - if !clientState.IsExpired(consState.Timestamp, ctx.BlockTime()) { - heights = append(heights, height) + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + keySplit := strings.Split(string(iterator.Key()), "/") + // consensus key is in the format "consensusStates/" + if len(keySplit) != 2 { + continue } - return false - } - - if err := ibctmtypes.IterateConsensusStateAscending(clientStore, metadataCb); err != nil { - return err + heights = append(heights, types.MustParseHeight(keySplit[1])) } for _, height := range heights { diff --git a/modules/core/02-client/legacy/v100/store_test.go b/modules/core/02-client/legacy/v100/store_test.go index 1ab17a44b9f..1b9856da183 100644 --- a/modules/core/02-client/legacy/v100/store_test.go +++ b/modules/core/02-client/legacy/v100/store_test.go @@ -118,88 +118,114 @@ func (suite *LegacyTestSuite) TestMigrateStoreSolomachine() { // ensure all expired consensus states are removed from tendermint client stores func (suite *LegacyTestSuite) TestMigrateStoreTendermint() { // create path and setup clients - path := ibctesting.NewPath(suite.chainA, suite.chainB) - suite.coordinator.SetupClients(path) + path1 := ibctesting.NewPath(suite.chainA, suite.chainB) + suite.coordinator.SetupClients(path1) - // collect all heights expected to be pruned - var pruneHeights []exported.Height - pruneHeights = append(pruneHeights, path.EndpointA.GetClientState().GetLatestHeight()) + path2 := ibctesting.NewPath(suite.chainA, suite.chainB) + suite.coordinator.SetupClients(path2) + pruneHeightMap := make(map[*ibctesting.Path][]exported.Height) + unexpiredHeightMap := make(map[*ibctesting.Path][]exported.Height) - // these heights will be expired and also pruned - for i := 0; i < 3; i++ { - path.EndpointA.UpdateClient() + for _, path := range []*ibctesting.Path{path1, path2} { + // collect all heights expected to be pruned + var pruneHeights []exported.Height pruneHeights = append(pruneHeights, path.EndpointA.GetClientState().GetLatestHeight()) - } - // double chedck all information is currently stored - for _, pruneHeight := range pruneHeights { - consState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, pruneHeight) - suite.Require().True(ok) - suite.Require().NotNil(consState) + // these heights will be expired and also pruned + for i := 0; i < 3; i++ { + path.EndpointA.UpdateClient() + pruneHeights = append(pruneHeights, path.EndpointA.GetClientState().GetLatestHeight()) + } - ctx := path.EndpointA.Chain.GetContext() - clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID) + // double chedck all information is currently stored + for _, pruneHeight := range pruneHeights { + consState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, pruneHeight) + suite.Require().True(ok) + suite.Require().NotNil(consState) - processedTime, ok := ibctmtypes.GetProcessedTime(clientStore, pruneHeight) - suite.Require().True(ok) - suite.Require().NotNil(processedTime) + ctx := path.EndpointA.Chain.GetContext() + clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID) - processedHeight, ok := ibctmtypes.GetProcessedHeight(clientStore, pruneHeight) - suite.Require().True(ok) - suite.Require().NotNil(processedHeight) + processedTime, ok := ibctmtypes.GetProcessedTime(clientStore, pruneHeight) + suite.Require().True(ok) + suite.Require().NotNil(processedTime) - expectedConsKey := ibctmtypes.GetIterationKey(clientStore, pruneHeight) - suite.Require().NotNil(expectedConsKey) + processedHeight, ok := ibctmtypes.GetProcessedHeight(clientStore, pruneHeight) + suite.Require().True(ok) + suite.Require().NotNil(processedHeight) + + expectedConsKey := ibctmtypes.GetIterationKey(clientStore, pruneHeight) + suite.Require().NotNil(expectedConsKey) + } + pruneHeightMap[path] = pruneHeights } // Increment the time by a week suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) - // create the consensus state that can be used as trusted height for next update - path.EndpointA.UpdateClient() + for _, path := range []*ibctesting.Path{path1, path2} { + // create the consensus state that can be used as trusted height for next update + var unexpiredHeights []exported.Height + path.EndpointA.UpdateClient() + unexpiredHeights = append(unexpiredHeights, path.EndpointA.GetClientState().GetLatestHeight()) + path.EndpointA.UpdateClient() + unexpiredHeights = append(unexpiredHeights, path.EndpointA.GetClientState().GetLatestHeight()) + + // remove processed height and iteration keys since these were missing from previous version of ibc module + clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(path.EndpointA.Chain.GetContext(), path.EndpointA.ClientID) + for _, height := range unexpiredHeights { + clientStore.Delete(ibctmtypes.ProcessedHeightKey(height)) + clientStore.Delete(ibctmtypes.IterationKey(height)) + } + + unexpiredHeightMap[path] = unexpiredHeights + } // Increment the time by another week, then update the client. // This will cause the consensus states created before the first time increment // to be expired suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) - err := v100.MigrateStore(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.GetSimApp().GetKey(host.StoreKey), path.EndpointA.Chain.App.AppCodec()) + err := v100.MigrateStore(path1.EndpointA.Chain.GetContext(), path1.EndpointA.Chain.GetSimApp().GetKey(host.StoreKey), path1.EndpointA.Chain.App.AppCodec()) suite.Require().NoError(err) - ctx := path.EndpointA.Chain.GetContext() - clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID) + for _, path := range []*ibctesting.Path{path1, path2} { + ctx := path.EndpointA.Chain.GetContext() + clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID) - // ensure everything has been pruned - for i, pruneHeight := range pruneHeights { - consState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, pruneHeight) - suite.Require().False(ok, i) - suite.Require().Nil(consState, i) + // ensure everything has been pruned + for i, pruneHeight := range pruneHeightMap[path] { + consState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, pruneHeight) + suite.Require().False(ok, i) + suite.Require().Nil(consState, i) - processedTime, ok := ibctmtypes.GetProcessedTime(clientStore, pruneHeight) - suite.Require().False(ok, i) - suite.Require().Equal(uint64(0), processedTime, i) + processedTime, ok := ibctmtypes.GetProcessedTime(clientStore, pruneHeight) + suite.Require().False(ok, i) + suite.Require().Equal(uint64(0), processedTime, i) - processedHeight, ok := ibctmtypes.GetProcessedHeight(clientStore, pruneHeight) - suite.Require().False(ok, i) - suite.Require().Nil(processedHeight, i) + processedHeight, ok := ibctmtypes.GetProcessedHeight(clientStore, pruneHeight) + suite.Require().False(ok, i) + suite.Require().Nil(processedHeight, i) - expectedConsKey := ibctmtypes.GetIterationKey(clientStore, pruneHeight) - suite.Require().Nil(expectedConsKey, i) - } + expectedConsKey := ibctmtypes.GetIterationKey(clientStore, pruneHeight) + suite.Require().Nil(expectedConsKey, i) + } - // ensure metadata is set for unexpired consensus state - height := path.EndpointA.GetClientState().GetLatestHeight() - consState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, height) - suite.Require().True(ok) - suite.Require().NotNil(consState) + // ensure metadata is set for unexpired consensus state + for _, height := range unexpiredHeightMap[path] { + consState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, height) + suite.Require().True(ok) + suite.Require().NotNil(consState) - processedTime, ok := ibctmtypes.GetProcessedTime(clientStore, height) - suite.Require().True(ok) - suite.Require().NotEqual(uint64(0), processedTime) + processedTime, ok := ibctmtypes.GetProcessedTime(clientStore, height) + suite.Require().True(ok) + suite.Require().NotEqual(uint64(0), processedTime) - processedHeight, ok := ibctmtypes.GetProcessedHeight(clientStore, height) - suite.Require().True(ok) - suite.Require().Equal(types.GetSelfHeight(path.EndpointA.Chain.GetContext()), processedHeight) + processedHeight, ok := ibctmtypes.GetProcessedHeight(clientStore, height) + suite.Require().True(ok) + suite.Require().Equal(types.GetSelfHeight(path.EndpointA.Chain.GetContext()), processedHeight) - consKey := ibctmtypes.GetIterationKey(clientStore, height) - suite.Require().Equal(host.ConsensusStateKey(height), consKey) + consKey := ibctmtypes.GetIterationKey(clientStore, height) + suite.Require().Equal(host.ConsensusStateKey(height), consKey) + } + } }