From d6c23c985557f681dccfe92cac29b64475de83a7 Mon Sep 17 00:00:00 2001 From: user Date: Thu, 4 Nov 2021 10:04:58 +0900 Subject: [PATCH 01/12] feat: add upgrade/migration --- simapp/app.go | 28 ++++- simapp/app_test.go | 143 ++++++++++++++++++++++++++ tests/mocks/types_module_module.go | 3 + testutil/context.go | 25 +++++ types/address/store_key.go | 33 ++++++ types/address/store_key_test.go | 45 ++++++++ types/context_test.go | 17 +-- types/errors/errors.go | 3 + types/module/configurator.go | 75 +++++++++++++- types/module/module.go | 34 ++++++ x/auth/module.go | 3 + x/auth/vesting/module.go | 3 + x/bank/keeper/migrations.go | 20 ++++ x/bank/module.go | 8 ++ x/bank/spec/01_state.md | 2 +- x/bank/types/key.go | 8 +- x/capability/module.go | 3 + x/crisis/module.go | 3 + x/distribution/keeper/migrations.go | 20 ++++ x/distribution/module.go | 6 ++ x/distribution/spec/02_state.md | 6 +- x/evidence/module.go | 3 + x/genutil/module.go | 3 + x/gov/keeper/migrations.go | 20 ++++ x/gov/module.go | 6 ++ x/ibc/applications/transfer/module.go | 3 + x/ibc/core/module.go | 3 + x/mint/module.go | 3 + x/params/keeper/common_test.go | 20 +--- x/params/module.go | 3 + x/slashing/keeper/migrations.go | 20 ++++ x/slashing/module.go | 6 ++ x/slashing/spec/02_state.md | 4 +- x/staking/keeper/migrations.go | 20 ++++ x/staking/module.go | 6 ++ x/staking/spec/01_state.md | 20 ++-- x/staking/types/validator.go | 8 +- x/upgrade/module.go | 3 + 38 files changed, 583 insertions(+), 56 deletions(-) create mode 100644 testutil/context.go create mode 100644 types/address/store_key.go create mode 100644 types/address/store_key_test.go create mode 100644 x/bank/keeper/migrations.go create mode 100644 x/distribution/keeper/migrations.go create mode 100644 x/gov/keeper/migrations.go create mode 100644 x/slashing/keeper/migrations.go create mode 100644 x/staking/keeper/migrations.go diff --git a/simapp/app.go b/simapp/app.go index 825cdf2599..25df1fa23e 100644 --- a/simapp/app.go +++ b/simapp/app.go @@ -179,6 +179,9 @@ type SimApp struct { // simulation manager sm *module.SimulationManager + + // the configurator + configurator module.Configurator } func init() { @@ -365,7 +368,8 @@ func NewSimApp( app.mm.RegisterInvariants(&app.CrisisKeeper) app.mm.RegisterRoutes(app.Router(), app.QueryRouter(), encodingConfig.Amino) - app.mm.RegisterServices(module.NewConfigurator(app.MsgServiceRouter(), app.GRPCQueryRouter())) + app.configurator = module.NewConfigurator(app.MsgServiceRouter(), app.GRPCQueryRouter()) + app.mm.RegisterServices(app.configurator) // add test gRPC service for testing gRPC queries in isolation testdata.RegisterQueryServer(app.GRPCQueryRouter(), testdata.QueryImpl{}) @@ -557,6 +561,28 @@ func (app *SimApp) RegisterTendermintService(clientCtx client.Context) { tmservice.RegisterTendermintService(app.BaseApp.GRPCQueryRouter(), clientCtx, app.interfaceRegistry) } +// RunMigrations performs in-place store migrations for all modules. This +// function MUST be only called by x/upgrade UpgradeHandler. +// +// `migrateFromVersions` is a map of moduleName to fromVersion (unit64), where +// fromVersion denotes the version from which we should migrate the module, the +// target version being the module's latest ConsensusVersion. +// +// Example: +// cfg := module.NewConfigurator(...) +// app.UpgradeKeeper.SetUpgradeHandler("store-migration", func(ctx sdk.Context, plan upgradetypes.Plan) { +// err := app.RunMigrations(ctx, module.MigrationMap{ +// "bank": 1, // Migrate x/bank from v1 to current x/bank's ConsensusVersion +// "staking": 8, // Migrate x/staking from v8 to current x/staking's ConsensusVersion +// }) +// if err != nil { +// panic(err) +// } +// }) +func (app *SimApp) RunMigrations(ctx sdk.Context, migrateFromVersions module.MigrationMap) error { + return app.mm.RunMigrations(ctx, app.configurator, migrateFromVersions) +} + // RegisterSwaggerAPI registers swagger route with API Server func RegisterSwaggerAPI(ctx client.Context, rtr *mux.Router) { statikFS, err := fs.New() diff --git a/simapp/app_test.go b/simapp/app_test.go index bc8b4e471e..6c6d6baa20 100644 --- a/simapp/app_test.go +++ b/simapp/app_test.go @@ -6,10 +6,31 @@ import ( "testing" "github.com/line/ostracon/libs/log" + ocproto "github.com/line/ostracon/proto/ostracon/types" "github.com/line/tm-db/v2/memdb" "github.com/stretchr/testify/require" + "github.com/line/lbm-sdk/baseapp" + sdk "github.com/line/lbm-sdk/types" + "github.com/line/lbm-sdk/types/module" abci "github.com/line/ostracon/abci/types" + + "github.com/line/lbm-sdk/x/auth" + "github.com/line/lbm-sdk/x/auth/vesting" + banktypes "github.com/line/lbm-sdk/x/bank/types" + "github.com/line/lbm-sdk/x/capability" + "github.com/line/lbm-sdk/x/crisis" + "github.com/line/lbm-sdk/x/distribution" + "github.com/line/lbm-sdk/x/evidence" + "github.com/line/lbm-sdk/x/genutil" + "github.com/line/lbm-sdk/x/gov" + transfer "github.com/line/lbm-sdk/x/ibc/applications/transfer" + ibc "github.com/line/lbm-sdk/x/ibc/core" + "github.com/line/lbm-sdk/x/mint" + "github.com/line/lbm-sdk/x/params" + "github.com/line/lbm-sdk/x/slashing" + "github.com/line/lbm-sdk/x/staking" + "github.com/line/lbm-sdk/x/upgrade" ) func TestSimAppExportAndBlockedAddrs(t *testing.T) { @@ -48,3 +69,125 @@ func TestGetMaccPerms(t *testing.T) { dup := GetMaccPerms() require.Equal(t, maccPerms, dup, "duplicated module account permissions differed from actual module account permissions") } + +func TestRunMigrations(t *testing.T) { + db := memdb.NewDB() + encCfg := MakeTestEncodingConfig() + logger := log.NewOCLogger(log.NewSyncWriter(os.Stdout)) + app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, 0, encCfg, EmptyAppOptions{}) + + // Create a new baseapp and configurator for the purpose of this test. + bApp := baseapp.NewBaseApp(appName, logger, db, encCfg.TxConfig.TxDecoder()) + bApp.SetCommitMultiStoreTracer(nil) + bApp.SetInterfaceRegistry(encCfg.InterfaceRegistry) + app.BaseApp = bApp + app.configurator = module.NewConfigurator(app.MsgServiceRouter(), app.GRPCQueryRouter()) + + // We register all modules on the Configurator, except x/bank. x/bank will + // serve as the test subject on which we run the migration tests. + // + // The loop below is the same as calling `RegisterServices` on + // ModuleManager, except that we skip x/bank. + for _, module := range app.mm.Modules { + if module.Name() == banktypes.ModuleName { + continue + } + + module.RegisterServices(app.configurator) + } + + // Initialize the chain + app.InitChain(abci.RequestInitChain{}) + app.Commit() + + testCases := []struct { + name string + moduleName string + forVersion uint64 + expRegErr bool // errors while registering migration + expRegErrMsg string + expRunErr bool // errors while running migration + expRunErrMsg string + expCalled int + }{ + { + "cannot register migration for version 0", + "bank", 0, + true, "module migration versions should start at 1: invalid version", false, "", 0, + }, + { + "throws error on RunMigrations if no migration registered for bank", + "", 1, + false, "", true, "no migrations found for module bank: not found", 0, + }, + { + "can register and run migration handler for x/bank", + "bank", 1, + false, "", false, "", 1, + }, + { + "cannot register migration handler for same module & forVersion", + "bank", 1, + true, "another migration for module bank and version 1 already exists: internal logic error", false, "", 0, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var err error + + // Since it's very hard to test actual in-place store migrations in + // tests (due to the difficulty of maintaing multiple versions of a + // module), we're just testing here that the migration logic is + // called. + called := 0 + + if tc.moduleName != "" { + // Register migration for module from version `forVersion` to `forVersion+1`. + err = app.configurator.RegisterMigration(tc.moduleName, tc.forVersion, func(sdk.Context) error { + called++ + + return nil + }) + + if tc.expRegErr { + require.EqualError(t, err, tc.expRegErrMsg) + + return + } + } + require.NoError(t, err) + + // Run migrations only for bank. That's why we put the initial + // version for bank as 1, and for all other modules, we put as + // their latest ConsensusVersion. + err = app.RunMigrations( + app.NewContext(true, ocproto.Header{Height: app.LastBlockHeight()}), + module.MigrationMap{ + "bank": 1, + "auth": auth.AppModule{}.ConsensusVersion(), + "staking": staking.AppModule{}.ConsensusVersion(), + "mint": mint.AppModule{}.ConsensusVersion(), + "distribution": distribution.AppModule{}.ConsensusVersion(), + "slashing": slashing.AppModule{}.ConsensusVersion(), + "gov": gov.AppModule{}.ConsensusVersion(), + "params": params.AppModule{}.ConsensusVersion(), + "ibc": ibc.AppModule{}.ConsensusVersion(), + "upgrade": upgrade.AppModule{}.ConsensusVersion(), + "vesting": vesting.AppModule{}.ConsensusVersion(), + "transfer": transfer.AppModule{}.ConsensusVersion(), + "evidence": evidence.AppModule{}.ConsensusVersion(), + "crisis": crisis.AppModule{}.ConsensusVersion(), + "genutil": genutil.AppModule{}.ConsensusVersion(), + "capability": capability.AppModule{}.ConsensusVersion(), + }, + ) + if tc.expRunErr { + require.EqualError(t, err, tc.expRunErrMsg) + } else { + require.NoError(t, err) + require.Equal(t, tc.expCalled, called) + } + }) + } +} diff --git a/tests/mocks/types_module_module.go b/tests/mocks/types_module_module.go index e820ca022a..d73283d8d4 100644 --- a/tests/mocks/types_module_module.go +++ b/tests/mocks/types_module_module.go @@ -323,6 +323,9 @@ func (m *MockAppModuleGenesis) ExportGenesis(arg0 types0.Context, arg1 codec.JSO return ret0 } +// ConsensusVersion mocks base method +func (m *MockAppModule) ConsensusVersion() uint64 { return 1 } + // ExportGenesis indicates an expected call of ExportGenesis func (mr *MockAppModuleGenesisMockRecorder) ExportGenesis(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() diff --git a/testutil/context.go b/testutil/context.go new file mode 100644 index 0000000000..7e2ee962b5 --- /dev/null +++ b/testutil/context.go @@ -0,0 +1,25 @@ +package testutil + +import ( + "github.com/line/ostracon/libs/log" + ocproto "github.com/line/ostracon/proto/ostracon/types" + "github.com/line/tm-db/v2/memdb" + + "github.com/line/lbm-sdk/store" + sdk "github.com/line/lbm-sdk/types" +) + +// DefaultContext creates a sdk.Context with a fresh MemDB that can be used in tests. +func DefaultContext(key sdk.StoreKey) sdk.Context { + db := memdb.NewDB() + cms := store.NewCommitMultiStore(db) + cms.MountStoreWithDB(key, sdk.StoreTypeIAVL, db) + // cms.MountStoreWithDB(tkey, sdk.StoreTypeTransient, db) + err := cms.LoadLatestVersion() + if err != nil { + panic(err) + } + ctx := sdk.NewContext(cms, ocproto.Header{}, false, log.NewNopLogger()) + + return ctx +} diff --git a/types/address/store_key.go b/types/address/store_key.go new file mode 100644 index 0000000000..c4f289d1d9 --- /dev/null +++ b/types/address/store_key.go @@ -0,0 +1,33 @@ +package address + +import ( + sdkerrors "github.com/line/lbm-sdk/types/errors" +) + +// MaxAddrLen is the maximum allowed length (in bytes) for an address. +const MaxAddrLen = 255 + +// LengthPrefix prefixes the address bytes with its length, this is used +// for example for variable-length components in store keys. +func LengthPrefix(bz []byte) ([]byte, error) { + bzLen := len(bz) + if bzLen == 0 { + return bz, nil + } + + if bzLen > MaxAddrLen { + return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownAddress, "address length should be max %d bytes, got %d", MaxAddrLen, bzLen) + } + + return append([]byte{byte(bzLen)}, bz...), nil +} + +// MustLengthPrefix is LengthPrefix with panic on error. +func MustLengthPrefix(bz []byte) []byte { + res, err := LengthPrefix(bz) + if err != nil { + panic(err) + } + + return res +} diff --git a/types/address/store_key_test.go b/types/address/store_key_test.go new file mode 100644 index 0000000000..4176bcf357 --- /dev/null +++ b/types/address/store_key_test.go @@ -0,0 +1,45 @@ +package address_test + +import ( + "testing" + + "github.com/stretchr/testify/suite" + "github.com/line/lbm-sdk/types/address" +) + +func TestStoreKeySuite(t *testing.T) { + suite.Run(t, new(StoreKeySuite)) +} + +type StoreKeySuite struct{ suite.Suite } + +func (suite *StoreKeySuite) TestLengthPrefix() { + require := suite.Require() + addr10byte := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + addr20byte := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19} + addr256byte := make([]byte, 256) + + tests := []struct { + name string + addr []byte + expStoreKey []byte + expErr bool + }{ + {"10-byte address", addr10byte, append([]byte{byte(10)}, addr10byte...), false}, + {"20-byte address", addr20byte, append([]byte{byte(20)}, addr20byte...), false}, + {"256-byte address (too long)", addr256byte, nil, true}, + } + + for _, tt := range tests { + tt := tt + suite.Run(tt.name, func() { + storeKey, err := address.LengthPrefix(tt.addr) + if tt.expErr { + require.Error(err) + } else { + require.NoError(err) + require.Equal(tt.expStoreKey, storeKey) + } + }) + } +} diff --git a/types/context_test.go b/types/context_test.go index d2ccb52836..925ca57dd5 100644 --- a/types/context_test.go +++ b/types/context_test.go @@ -7,14 +7,12 @@ import ( "github.com/golang/mock/gomock" abci "github.com/line/ostracon/abci/types" - "github.com/line/ostracon/libs/log" ocproto "github.com/line/ostracon/proto/ostracon/types" - "github.com/line/tm-db/v2/memdb" "github.com/stretchr/testify/suite" "github.com/line/lbm-sdk/crypto/keys/secp256k1" - "github.com/line/lbm-sdk/store" "github.com/line/lbm-sdk/tests/mocks" + "github.com/line/lbm-sdk/testutil" "github.com/line/lbm-sdk/types" ) @@ -26,15 +24,6 @@ func TestContextTestSuite(t *testing.T) { suite.Run(t, new(contextTestSuite)) } -func (s *contextTestSuite) defaultContext(key types.StoreKey) types.Context { - db := memdb.NewDB() - cms := store.NewCommitMultiStore(db) - cms.MountStoreWithDB(key, types.StoreTypeIAVL, db) - s.Require().NoError(cms.LoadLatestVersion()) - ctx := types.NewContext(cms, ocproto.Header{}, false, log.NewNopLogger()) - return ctx -} - func (s *contextTestSuite) TestCacheContext() { key := types.NewKVStoreKey(s.T().Name() + "_TestCacheContext") k1 := []byte("hello") @@ -42,7 +31,7 @@ func (s *contextTestSuite) TestCacheContext() { k2 := []byte("key") v2 := []byte("value") - ctx := s.defaultContext(key) + ctx := testutil.DefaultContext(key) store := ctx.KVStore(key) store.Set(k1, v1) s.Require().Equal(v1, store.Get(k1)) @@ -64,7 +53,7 @@ func (s *contextTestSuite) TestCacheContext() { func (s *contextTestSuite) TestLogContext() { key := types.NewKVStoreKey(s.T().Name()) - ctx := s.defaultContext(key) + ctx := testutil.DefaultContext(key) ctrl := gomock.NewController(s.T()) s.T().Cleanup(ctrl.Finish) diff --git a/types/errors/errors.go b/types/errors/errors.go index fd2b30907d..9208698bf7 100644 --- a/types/errors/errors.go +++ b/types/errors/errors.go @@ -139,6 +139,9 @@ var ( // less than (current block height - ValidSigBlockPeriod) ErrInvalidSigBlockHeight = Register(RootCodespace, 38, "invalid sig block height") + // ErrNotFound defines an error when requested entity doesn't exist in the state. + ErrNotFound = Register(RootCodespace, 39, "not found") + // ErrPanic is only set when we recover from a panic, so we know to // redact potentially sensitive system info ErrPanic = Register(UndefinedCodespace, 111222, "panic") diff --git a/types/module/configurator.go b/types/module/configurator.go index d561dd9eef..335675cca0 100644 --- a/types/module/configurator.go +++ b/types/module/configurator.go @@ -1,6 +1,11 @@ package module -import "github.com/gogo/protobuf/grpc" +import ( + "github.com/gogo/protobuf/grpc" + + sdk "github.com/line/lbm-sdk/types" + sdkerrors "github.com/line/lbm-sdk/types/errors" +) // Configurator provides the hooks to allow modules to configure and register // their services in the RegisterServices method. It is designed to eventually @@ -15,16 +20,34 @@ type Configurator interface { // QueryServer returns a grpc.Server instance which allows registering services // that will be exposed as gRPC services as well as ABCI query handlers. QueryServer() grpc.Server + + // RegisterMigration registers an in-place store migration for a module. The + // handler is a migration script to perform in-place migrations from version + // `forVersion` to version `forVersion+1`. + // + // EACH TIME a module's ConsensusVersion increments, a new migration MUST + // be registered using this function. If a migration handler is missing for + // a particular function, the upgrade logic (see RunMigrations function) + // will panic. If the ConsensusVersion bump does not introduce any store + // changes, then a no-op function must be registered here. + RegisterMigration(moduleName string, forVersion uint64, handler MigrationHandler) error } type configurator struct { msgServer grpc.Server queryServer grpc.Server + + // migrations is a map of moduleName -> forVersion -> migration script handler + migrations map[string]map[uint64]MigrationHandler } // NewConfigurator returns a new Configurator instance func NewConfigurator(msgServer grpc.Server, queryServer grpc.Server) Configurator { - return configurator{msgServer: msgServer, queryServer: queryServer} + return configurator{ + msgServer: msgServer, + queryServer: queryServer, + migrations: map[string]map[uint64]MigrationHandler{}, + } } var _ Configurator = configurator{} @@ -38,3 +61,51 @@ func (c configurator) MsgServer() grpc.Server { func (c configurator) QueryServer() grpc.Server { return c.queryServer } + +// RegisterMigration implements the Configurator.RegisterMigration method +func (c configurator) RegisterMigration(moduleName string, forVersion uint64, handler MigrationHandler) error { + if forVersion == 0 { + return sdkerrors.Wrap(sdkerrors.ErrInvalidVersion, "module migration versions should start at 1") + } + + if c.migrations[moduleName] == nil { + c.migrations[moduleName] = map[uint64]MigrationHandler{} + } + + if c.migrations[moduleName][forVersion] != nil { + return sdkerrors.Wrapf(sdkerrors.ErrLogic, "another migration for module %s and version %d already exists", moduleName, forVersion) + } + + c.migrations[moduleName][forVersion] = handler + + return nil +} + +// runModuleMigrations runs all in-place store migrations for one given module from a +// version to another version. +func (c configurator) runModuleMigrations(ctx sdk.Context, moduleName string, fromVersion, toVersion uint64) error { + // No-op if toVersion is the initial version. + if toVersion <= 1 { + return nil + } + + moduleMigrationsMap, found := c.migrations[moduleName] + if !found { + return sdkerrors.Wrapf(sdkerrors.ErrNotFound, "no migrations found for module %s", moduleName) + } + + // Run in-place migrations for the module sequentially until toVersion. + for i := fromVersion; i < toVersion; i++ { + migrateFn, found := moduleMigrationsMap[i] + if !found { + return sdkerrors.Wrapf(sdkerrors.ErrNotFound, "no migration found for module %s from version %d to version %d", moduleName, i, i+1) + } + + err := migrateFn(ctx) + if err != nil { + return err + } + } + + return nil +} diff --git a/types/module/module.go b/types/module/module.go index 6a8e4212d5..a296d030d5 100644 --- a/types/module/module.go +++ b/types/module/module.go @@ -40,6 +40,7 @@ import ( "github.com/line/lbm-sdk/codec" codectypes "github.com/line/lbm-sdk/codec/types" sdk "github.com/line/lbm-sdk/types" + sdkerrors "github.com/line/lbm-sdk/types/errors" ) //__________________________________________________________________________________________ @@ -174,6 +175,12 @@ type AppModule interface { // RegisterServices allows a module to register services RegisterServices(Configurator) + // ConsensusVersion is a sequence number for state-breaking change of the + // module. It should be incremented on each consensus-breaking change + // introduced by the module. To avoid wrong/empty versions, the initial version + // should be set to 1. + ConsensusVersion() uint64 + // ABCI BeginBlock(sdk.Context, abci.RequestBeginBlock) EndBlock(sdk.Context, abci.RequestEndBlock) []abci.ValidatorUpdate @@ -208,6 +215,9 @@ func (gam GenesisOnlyAppModule) LegacyQuerierHandler(*codec.LegacyAmino) sdk.Que // RegisterServices registers all services. func (gam GenesisOnlyAppModule) RegisterServices(Configurator) {} +// ConsensusVersion implements AppModule/ConsensusVersion. +func (gam GenesisOnlyAppModule) ConsensusVersion() uint64 { return 1 } + // BeginBlock returns an empty module begin-block func (gam GenesisOnlyAppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) {} @@ -328,6 +338,30 @@ func (m *Manager) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) map[st return genesisData } +// MigrationHandler is the migration function that each module registers. +type MigrationHandler func(sdk.Context) error + +// MigrationMap is a map of moduleName -> version, where version denotes the +// version from which we should perform the migration for each module. +type MigrationMap map[string]uint64 + +// RunMigrations performs in-place store migrations for all modules. +func (m Manager) RunMigrations(ctx sdk.Context, cfg Configurator, migrateFromVersions MigrationMap) error { + c, ok := cfg.(configurator) + if !ok { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidType, "expected %T, got %T", configurator{}, cfg) + } + + for moduleName, module := range m.Modules { + err := c.runModuleMigrations(ctx, moduleName, migrateFromVersions[moduleName], module.ConsensusVersion()) + if err != nil { + return err + } + } + + return nil +} + // BeginBlock performs begin block functionality for all modules. It creates a // child context with an event manager to aggregate events emitted from all // modules. diff --git a/x/auth/module.go b/x/auth/module.go index 98be3370d6..69d4b301bf 100644 --- a/x/auth/module.go +++ b/x/auth/module.go @@ -148,6 +148,9 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json return cdc.MustMarshalJSON(gs) } +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } + // BeginBlock returns the begin blocker for the auth module. func (AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {} diff --git a/x/auth/vesting/module.go b/x/auth/vesting/module.go index 64d11b368c..d3e6241095 100644 --- a/x/auth/vesting/module.go +++ b/x/auth/vesting/module.go @@ -127,3 +127,6 @@ func (am AppModule) EndBlock(_ sdk.Context, _ abci.RequestEndBlock) []abci.Valid func (am AppModule) ExportGenesis(_ sdk.Context, cdc codec.JSONMarshaler) json.RawMessage { return am.DefaultGenesis(cdc) } + +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } diff --git a/x/bank/keeper/migrations.go b/x/bank/keeper/migrations.go new file mode 100644 index 0000000000..f5dbc2eb06 --- /dev/null +++ b/x/bank/keeper/migrations.go @@ -0,0 +1,20 @@ +package keeper + +import ( + sdk "github.com/line/lbm-sdk/types" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper BaseKeeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper BaseKeeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + return nil +} diff --git a/x/bank/module.go b/x/bank/module.go index 6f9f19c8c8..b4cbdf45f6 100644 --- a/x/bank/module.go +++ b/x/bank/module.go @@ -100,6 +100,11 @@ type AppModule struct { func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) types.RegisterQueryServer(cfg.QueryServer(), am.keeper) + + m := keeper.NewMigrator(am.keeper.(keeper.BaseKeeper)) + if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + panic(fmt.Sprintf("failed to migrate x/bank from version 1 to 2: %v", err)) + } } // NewAppModule creates a new AppModule object @@ -151,6 +156,9 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json return cdc.MustMarshalJSON(gs) } +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 2 } + // BeginBlock performs a no-op. func (AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {} diff --git a/x/bank/spec/01_state.md b/x/bank/spec/01_state.md index f744e2e779..6ca6b97e8f 100644 --- a/x/bank/spec/01_state.md +++ b/x/bank/spec/01_state.md @@ -7,5 +7,5 @@ order: 1 The `x/bank` module keeps state of two primary objects, account balances and the total supply of all balances. -- Balances: `[]byte("balances") | []byte(address) / []byte(balance.Denom) -> ProtocolBuffer(balance)` - Supply: `0x0 -> ProtocolBuffer(Supply)` +- Balances: `0x2 | byte(address length) | []byte(address) | []byte(balance.Denom) -> ProtocolBuffer(balance)` diff --git a/x/bank/types/key.go b/x/bank/types/key.go index 9a1d509331..4742ba4de5 100644 --- a/x/bank/types/key.go +++ b/x/bank/types/key.go @@ -5,6 +5,7 @@ import ( "strings" sdk "github.com/line/lbm-sdk/types" + "github.com/line/lbm-sdk/types/address" ) const ( @@ -23,7 +24,7 @@ const ( // KVStore keys var ( - BalancesPrefix = []byte("balances") + BalancesPrefix = []byte{0x02} SupplyKey = []byte{0x00} DenomMetadataPrefix = []byte{0x1} @@ -52,3 +53,8 @@ func AddressFromBalancesStore(key []byte) sdk.AccAddress { } return sdk.AccAddress(addr[:index]) } + +// CreateAccountBalancesPrefix creates the prefix for an account's balances. +func CreateAccountBalancesPrefix(addr []byte) []byte { + return append(BalancesPrefix, address.MustLengthPrefix(addr)...) +} diff --git a/x/capability/module.go b/x/capability/module.go index 0ba6b7f21f..26f3f347d3 100644 --- a/x/capability/module.go +++ b/x/capability/module.go @@ -136,6 +136,9 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json return cdc.MustMarshalJSON(genState) } +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } + // BeginBlock executes all ABCI BeginBlock logic respective to the capability module. func (am AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {} diff --git a/x/crisis/module.go b/x/crisis/module.go index 307359d068..1f8de5156a 100644 --- a/x/crisis/module.go +++ b/x/crisis/module.go @@ -158,6 +158,9 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json return cdc.MustMarshalJSON(gs) } +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } + // BeginBlock performs a no-op. func (AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {} diff --git a/x/distribution/keeper/migrations.go b/x/distribution/keeper/migrations.go new file mode 100644 index 0000000000..775ea928f7 --- /dev/null +++ b/x/distribution/keeper/migrations.go @@ -0,0 +1,20 @@ +package keeper + +import ( + sdk "github.com/line/lbm-sdk/types" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper Keeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper Keeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + return nil +} diff --git a/x/distribution/module.go b/x/distribution/module.go index 0c5c8f82a1..7d34ae018f 100644 --- a/x/distribution/module.go +++ b/x/distribution/module.go @@ -143,6 +143,9 @@ func (am AppModule) LegacyQuerierHandler(legacyQuerierCdc *codec.LegacyAmino) sd func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) types.RegisterQueryServer(cfg.QueryServer(), am.keeper) + + m := keeper.NewMigrator(am.keeper) + cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2) } // InitGenesis performs genesis initialization for the distribution module. It returns @@ -161,6 +164,9 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json return cdc.MustMarshalJSON(gs) } +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 2 } + // BeginBlock returns the begin blocker for the distribution module. func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) { BeginBlocker(ctx, req, am.keeper) diff --git a/x/distribution/spec/02_state.md b/x/distribution/spec/02_state.md index 53c2f316cb..7dd01fd6cc 100644 --- a/x/distribution/spec/02_state.md +++ b/x/distribution/spec/02_state.md @@ -15,7 +15,7 @@ for fractions of coins to be received from operations like inflation. When coins are distributed from the pool they are truncated back to `sdk.Coins` which are non-decimal. -- FeePool: `0x00 -> ProtocolBuffer(FeePool)` +- FeePool: `0x00 -> ProtocolBuffer(FeePool)` ```go // coins with decimal @@ -38,7 +38,7 @@ Validator distribution information for the relevant validator is updated each ti 3. any delegator withdraws from a validator, or 4. the validator withdraws it's commission. -- ValidatorDistInfo: `0x02 | ValOperatorAddr -> ProtocolBuffer(validatorDistribution)` +- ValidatorDistInfo: `0x02 | ValOperatorAddrLen (1 byte) | ValOperatorAddr -> ProtocolBuffer(validatorDistribution)` ```go type ValidatorDistInfo struct { @@ -56,7 +56,7 @@ properties change (aka bonded tokens etc.) its properties will remain constant and the delegator's _accumulation_ factor can be calculated passively knowing only the height of the last withdrawal and its current properties. -- DelegationDistInfo: `0x02 | DelegatorAddr | ValOperatorAddr -> ProtocolBuffer(delegatorDist)` +- DelegationDistInfo: `0x02 | DelegatorAddrLen (1 byte) | DelegatorAddr | ValOperatorAddrLen (1 byte) | ValOperatorAddr -> ProtocolBuffer(delegatorDist)` ```go type DelegationDistInfo struct { diff --git a/x/evidence/module.go b/x/evidence/module.go index 02afc91332..134cd481c8 100644 --- a/x/evidence/module.go +++ b/x/evidence/module.go @@ -175,6 +175,9 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json return cdc.MustMarshalJSON(ExportGenesis(ctx, am.keeper)) } +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } + // BeginBlock executes all ABCI BeginBlock logic respective to the evidence module. func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) { BeginBlocker(ctx, req, am.keeper) diff --git a/x/genutil/module.go b/x/genutil/module.go index 28c8e274fb..1ac756c61d 100644 --- a/x/genutil/module.go +++ b/x/genutil/module.go @@ -110,3 +110,6 @@ func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONMarshaler, data j func (am AppModule) ExportGenesis(_ sdk.Context, cdc codec.JSONMarshaler) json.RawMessage { return am.DefaultGenesis(cdc) } + +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } diff --git a/x/gov/keeper/migrations.go b/x/gov/keeper/migrations.go new file mode 100644 index 0000000000..775ea928f7 --- /dev/null +++ b/x/gov/keeper/migrations.go @@ -0,0 +1,20 @@ +package keeper + +import ( + sdk "github.com/line/lbm-sdk/types" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper Keeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper Keeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + return nil +} diff --git a/x/gov/module.go b/x/gov/module.go index 365ffcbc60..a540f4a605 100644 --- a/x/gov/module.go +++ b/x/gov/module.go @@ -159,6 +159,9 @@ func (am AppModule) LegacyQuerierHandler(legacyQuerierCdc *codec.LegacyAmino) sd func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) types.RegisterQueryServer(cfg.QueryServer(), am.keeper) + + m := keeper.NewMigrator(am.keeper) + cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2) } // InitGenesis performs genesis initialization for the gov module. It returns @@ -177,6 +180,9 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json return cdc.MustMarshalJSON(gs) } +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 2 } + // BeginBlock performs a no-op. func (AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {} diff --git a/x/ibc/applications/transfer/module.go b/x/ibc/applications/transfer/module.go index 6886770dac..cde43ccf8c 100644 --- a/x/ibc/applications/transfer/module.go +++ b/x/ibc/applications/transfer/module.go @@ -145,6 +145,9 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json return cdc.MustMarshalJSON(gs) } +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } + // BeginBlock implements the AppModule interface func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) { } diff --git a/x/ibc/core/module.go b/x/ibc/core/module.go index 6a3168e998..b5469fcc6b 100644 --- a/x/ibc/core/module.go +++ b/x/ibc/core/module.go @@ -156,6 +156,9 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json return cdc.MustMarshalJSON(ExportGenesis(ctx, *am.keeper)) } +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } + // BeginBlock returns the begin blocker for the ibc module. func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) { ibcclient.BeginBlocker(ctx, am.keeper.ClientKeeper) diff --git a/x/mint/module.go b/x/mint/module.go index 3aba593856..dc94d7876f 100644 --- a/x/mint/module.go +++ b/x/mint/module.go @@ -146,6 +146,9 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json return cdc.MustMarshalJSON(gs) } +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } + // BeginBlock returns the begin blocker for the mint module. func (am AppModule) BeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock) { BeginBlocker(ctx, am.keeper) diff --git a/x/params/keeper/common_test.go b/x/params/keeper/common_test.go index 6406c5d356..412ea049a6 100644 --- a/x/params/keeper/common_test.go +++ b/x/params/keeper/common_test.go @@ -1,14 +1,10 @@ package keeper_test import ( - "github.com/line/ostracon/libs/log" - ocproto "github.com/line/ostracon/proto/ostracon/types" - "github.com/line/tm-db/v2/memdb" - "github.com/line/lbm-sdk/simapp" + "github.com/line/lbm-sdk/testutil" "github.com/line/lbm-sdk/codec" - "github.com/line/lbm-sdk/store" sdk "github.com/line/lbm-sdk/types" paramskeeper "github.com/line/lbm-sdk/x/params/keeper" ) @@ -17,7 +13,7 @@ func testComponents() (*codec.LegacyAmino, sdk.Context, sdk.StoreKey, paramskeep marshaler := simapp.MakeTestEncodingConfig().Marshaler legacyAmino := createTestCodec() mkey := sdk.NewKVStoreKey("test") - ctx := defaultContext(mkey) + ctx := testutil.DefaultContext(mkey) keeper := paramskeeper.NewKeeper(marshaler, legacyAmino, mkey) return legacyAmino, ctx, mkey, keeper @@ -36,15 +32,3 @@ func createTestCodec() *codec.LegacyAmino { cdc.RegisterConcrete(invalid{}, "test/invalid", nil) return cdc } - -func defaultContext(key sdk.StoreKey) sdk.Context { - db := memdb.NewDB() - cms := store.NewCommitMultiStore(db) - cms.MountStoreWithDB(key, sdk.StoreTypeIAVL, db) - err := cms.LoadLatestVersion() - if err != nil { - panic(err) - } - ctx := sdk.NewContext(cms, ocproto.Header{}, false, log.NewNopLogger()) - return ctx -} diff --git a/x/params/module.go b/x/params/module.go index 5357b1adf7..cc0235aca9 100644 --- a/x/params/module.go +++ b/x/params/module.go @@ -139,6 +139,9 @@ func (am AppModule) ExportGenesis(_ sdk.Context, _ codec.JSONMarshaler) json.Raw return nil } +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } + // BeginBlock performs a no-op. func (AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {} diff --git a/x/slashing/keeper/migrations.go b/x/slashing/keeper/migrations.go new file mode 100644 index 0000000000..775ea928f7 --- /dev/null +++ b/x/slashing/keeper/migrations.go @@ -0,0 +1,20 @@ +package keeper + +import ( + sdk "github.com/line/lbm-sdk/types" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper Keeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper Keeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + return nil +} diff --git a/x/slashing/module.go b/x/slashing/module.go index c077fc3491..bcdef55d63 100644 --- a/x/slashing/module.go +++ b/x/slashing/module.go @@ -141,6 +141,9 @@ func (am AppModule) LegacyQuerierHandler(legacyQuerierCdc *codec.LegacyAmino) sd func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) types.RegisterQueryServer(cfg.QueryServer(), am.keeper) + + m := keeper.NewMigrator(am.keeper) + cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2) } // InitGenesis performs genesis initialization for the slashing module. It returns @@ -159,6 +162,9 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json return cdc.MustMarshalJSON(gs) } +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } + // BeginBlock returns the begin blocker for the slashing module. func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) { BeginBlocker(ctx, req, am.keeper) diff --git a/x/slashing/spec/02_state.md b/x/slashing/spec/02_state.md index 2f2acc6d8d..ff83537b51 100644 --- a/x/slashing/spec/02_state.md +++ b/x/slashing/spec/02_state.md @@ -20,8 +20,8 @@ number of blocks by being automatically jailed, potentially slashed, and unbonde Information about validator's liveness activity is tracked through `ValidatorSigningInfo`. It is indexed in the store as follows: -- ValidatorSigningInfo: ` 0x01 | ConsAddress -> amino(valSigningInfo)` -- MissedBlocksBitArray: ` 0x02 | ConsAddress | LittleEndianUint64(signArrayIndex) -> VarInt(didMiss)` +- ValidatorSigningInfo: ` 0x01 | ConsAddrLen (1 byte) | ConsAddress -> ProtocolBuffer(ValSigningInfo)` +- MissedBlocksBitArray: ` 0x02 | ConsAddrLen (1 byte) | ConsAddress | LittleEndianUint64(signArrayIndex) -> VarInt(didMiss)` (varint is a number encoding format) The first mapping allows us to easily lookup the recent signing info for a validator based on the validator's consensus address. The second mapping acts diff --git a/x/staking/keeper/migrations.go b/x/staking/keeper/migrations.go new file mode 100644 index 0000000000..775ea928f7 --- /dev/null +++ b/x/staking/keeper/migrations.go @@ -0,0 +1,20 @@ +package keeper + +import ( + sdk "github.com/line/lbm-sdk/types" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper Keeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper Keeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + return nil +} diff --git a/x/staking/module.go b/x/staking/module.go index 304160291f..560a32d2df 100644 --- a/x/staking/module.go +++ b/x/staking/module.go @@ -138,6 +138,9 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) querier := keeper.Querier{Keeper: am.keeper} types.RegisterQueryServer(cfg.QueryServer(), querier) + + m := keeper.NewMigrator(am.keeper) + cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2) } // InitGenesis performs genesis initialization for the staking module. It returns @@ -157,6 +160,9 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json return cdc.MustMarshalJSON(gs) } +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 2 } + // BeginBlock returns the begin blocker for the staking module. func (am AppModule) BeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock) { BeginBlocker(ctx, am.keeper) diff --git a/x/staking/spec/01_state.md b/x/staking/spec/01_state.md index 2595faeefa..fef30e1a12 100644 --- a/x/staking/spec/01_state.md +++ b/x/staking/spec/01_state.md @@ -44,10 +44,10 @@ required lookups for slashing and validator-set updates. A third special index throughout each block, unlike the first two indices which mirror the validator records within a block. -- Validators: `0x21 | OperatorAddr -> ProtocolBuffer(validator)` -- ValidatorsByConsAddr: `0x22 | ConsAddr -> OperatorAddr` -- ValidatorsByPower: `0x23 | BigEndian(ConsensusPower) | OperatorAddr -> OperatorAddr` -- LastValidatorsPower: `0x11 OperatorAddr -> ProtocolBuffer(ConsensusPower)` +- Validators: `0x21 | OperatorAddrLen (1 byte) | OperatorAddr -> ProtocolBuffer(validator)` +- ValidatorsByConsAddr: `0x22 | ConsAddrLen (1 byte) | ConsAddr -> OperatorAddr` +- ValidatorsByPower: `0x23 | BigEndian(ConsensusPower) | OperatorAddrLen (1 byte) | OperatorAddr -> OperatorAddr` +- LastValidatorsPower: `0x11 | OperatorAddrLen (1 byte) | OperatorAddr -> ProtocolBuffer(ConsensusPower)` `Validators` is the primary index - it ensures that each operator can have only one associated validator, where the public key of that validator can change in the @@ -77,7 +77,7 @@ Each validator's state is stored in a `Validator` struct: Delegations are identified by combining `DelegatorAddr` (the address of the delegator) with the `ValidatorAddr` Delegators are indexed in the store as follows: -- Delegation: `0x31 | DelegatorAddr | ValidatorAddr -> ProtocolBuffer(delegation)` +- Delegation: `0x31 | DelegatorAddrLen (1 byte) | DelegatorAddr | ValidatorAddrLen (1 byte) | ValidatorAddr -> ProtocolBuffer(delegation)` Stake holders may delegate coins to validators; under this circumstance their funds are held in a `Delegation` data structure. It is owned by one @@ -113,8 +113,8 @@ detected. `UnbondingDelegation` are indexed in the store as: -- UnbondingDelegation: `0x32 | DelegatorAddr | ValidatorAddr -> ProtocolBuffer(unbondingDelegation)` -- UnbondingDelegationsFromValidator: `0x33 | ValidatorAddr | DelegatorAddr -> nil` +- UnbondingDelegation: `0x32 | DelegatorAddrLen (1 byte) | DelegatorAddr | ValidatorAddrLen (1 byte) | ValidatorAddr -> ProtocolBuffer(unbondingDelegation)` +- UnbondingDelegationsFromValidator: `0x33 | ValidatorAddrLen (1 byte) | ValidatorAddr | DelegatorAddrLen (1 byte) | DelegatorAddr -> nil` The first map here is used in queries, to lookup all unbonding delegations for a given delegator, while the second map is used in slashing, to lookup all @@ -135,9 +135,9 @@ committed by the source validator. `Redelegation` are indexed in the store as: -- Redelegations: `0x34 | DelegatorAddr | ValidatorSrcAddr | ValidatorDstAddr -> ProtocolBuffer(redelegation)` -- RedelegationsBySrc: `0x35 | ValidatorSrcAddr | ValidatorDstAddr | DelegatorAddr -> nil` -- RedelegationsByDst: `0x36 | ValidatorDstAddr | ValidatorSrcAddr | DelegatorAddr -> nil` +- Redelegations: `0x34 | DelegatorAddrLen (1 byte) | DelegatorAddr | ValidatorAddrLen (1 byte) | ValidatorSrcAddr | ValidatorDstAddr -> ProtocolBuffer(redelegation)` +- RedelegationsBySrc: `0x35 | ValidatorSrcAddrLen (1 byte) | ValidatorSrcAddr | ValidatorDstAddrLen (1 byte) | ValidatorDstAddr | DelegatorAddrLen (1 byte) | DelegatorAddr -> nil` +- RedelegationsByDst: `0x36 | ValidatorDstAddrLen (1 byte) | ValidatorDstAddr | ValidatorSrcAddrLen (1 byte) | ValidatorSrcAddr | DelegatorAddrLen (1 byte) | DelegatorAddr -> nil` The first map here is used for queries, to lookup all redelegations for a given delegator. The second map is used for slashing based on the `ValidatorSrcAddr`, diff --git a/x/staking/types/validator.go b/x/staking/types/validator.go index 3a3a45051d..46ec3e1575 100644 --- a/x/staking/types/validator.go +++ b/x/staking/types/validator.go @@ -262,13 +262,13 @@ func (d Description) EnsureLength() (Description, error) { // ABCIValidatorUpdate returns an abci.ValidatorUpdate from a staking validator type // with the full validator power func (v Validator) ABCIValidatorUpdate() abci.ValidatorUpdate { - tmProtoPk, err := v.OcConsPublicKey() + ocprotoPk, err := v.OcConsPublicKey() if err != nil { panic(err) } return abci.ValidatorUpdate{ - PubKey: tmProtoPk, + PubKey: ocprotoPk, Power: v.ConsensusPower(), } } @@ -276,13 +276,13 @@ func (v Validator) ABCIValidatorUpdate() abci.ValidatorUpdate { // ABCIValidatorUpdateZero returns an abci.ValidatorUpdate from a staking validator type // with zero power used for validator updates. func (v Validator) ABCIValidatorUpdateZero() abci.ValidatorUpdate { - tmProtoPk, err := v.OcConsPublicKey() + ocprotoPk, err := v.OcConsPublicKey() if err != nil { panic(err) } return abci.ValidatorUpdate{ - PubKey: tmProtoPk, + PubKey: ocprotoPk, Power: 0, } } diff --git a/x/upgrade/module.go b/x/upgrade/module.go index c56a94343e..6c78a369cb 100644 --- a/x/upgrade/module.go +++ b/x/upgrade/module.go @@ -120,6 +120,9 @@ func (am AppModule) ExportGenesis(_ sdk.Context, cdc codec.JSONMarshaler) json.R return am.DefaultGenesis(cdc) } +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } + // BeginBlock calls the upgrade module hooks // // CONTRACT: this is registered in BeginBlocker *before* all other modules' BeginBlock functions From 272d118ec438ad55cfc9e59629c04a2cf86e7666 Mon Sep 17 00:00:00 2001 From: leesj9476 Date: Mon, 8 Nov 2021 10:44:30 +0900 Subject: [PATCH 02/12] feat: add upgrade/migration --- cosmovisor/init_cosmovisor.sh | 33 +++++++++++++++++++++++++++++++ simapp/app_test.go | 4 ++-- x/auth/keeper/migrations.go | 20 +++++++++++++++++++ x/auth/module.go | 5 +++++ x/bank/module.go | 6 +++--- x/capability/keeper/migrations.go | 20 +++++++++++++++++++ x/capability/module.go | 7 ++++++- x/crisis/keeper/migrations.go | 20 +++++++++++++++++++ x/crisis/module.go | 5 +++++ x/distribution/module.go | 8 +++++--- x/evidence/keeper/migrations.go | 20 +++++++++++++++++++ x/evidence/module.go | 5 +++++ x/gov/module.go | 8 +++++--- x/mint/keeper/migrations.go | 20 +++++++++++++++++++ x/mint/module.go | 5 +++++ x/params/keeper/migrations.go | 20 +++++++++++++++++++ x/params/module.go | 5 +++++ x/slashing/module.go | 6 ++++-- x/staking/module.go | 8 +++++--- x/upgrade/keeper/migrations.go | 20 +++++++++++++++++++ x/upgrade/module.go | 5 +++++ x/wasm/keeper/migrations.go | 20 +++++++++++++++++++ x/wasm/module.go | 8 ++++++++ 23 files changed, 261 insertions(+), 17 deletions(-) create mode 100644 cosmovisor/init_cosmovisor.sh create mode 100644 x/auth/keeper/migrations.go create mode 100644 x/capability/keeper/migrations.go create mode 100644 x/crisis/keeper/migrations.go create mode 100644 x/evidence/keeper/migrations.go create mode 100644 x/mint/keeper/migrations.go create mode 100644 x/params/keeper/migrations.go create mode 100644 x/upgrade/keeper/migrations.go create mode 100644 x/wasm/keeper/migrations.go diff --git a/cosmovisor/init_cosmovisor.sh b/cosmovisor/init_cosmovisor.sh new file mode 100644 index 0000000000..8d50c89286 --- /dev/null +++ b/cosmovisor/init_cosmovisor.sh @@ -0,0 +1,33 @@ +#!/bin/sh + +BINARY="simd" + +BASE_DIR=~/.simapp +CHAIN_DIR="${BASE_DIR}/simapp0" +BINARY_DIR="$(which ${BINARY})" + +export DAEMON_NAME="${BINARY}" +export DAEMON_HOME="${CHAIN_DIR}" + +if [ -z ${BINARY_DIR} ]; then + echo "Failed to get ${BINARY_DIR}. Aborting..." + exit 1 +fi + +if [ ! -d ${CHAIN_DIR} ]; then + echo "${CHAIN_DIR} is not exist. Aborting..." + exit 1 +fi + +BIN_DIR="${CHAIN_DIR}/cosmovisor/genesis/bin" +if ! mkdir -p ${BIN_DIR}; then + echo "Failed to create cosmovisor/genesis/bin folder(${CHAIN_DIR}). Aborting..." + exit 1 +fi + +if ! cp ${BINARY_DIR} ${BIN_DIR}; then + echo "Failed to copy ${BINARY_DIR} to ${BIN_DIR}. Aborting..." + exit 1 +fi + +echo "cosmovisor version: $(cosmovisor version)" diff --git a/simapp/app_test.go b/simapp/app_test.go index 6c6d6baa20..63065ba876 100644 --- a/simapp/app_test.go +++ b/simapp/app_test.go @@ -115,7 +115,7 @@ func TestRunMigrations(t *testing.T) { "bank", 0, true, "module migration versions should start at 1: invalid version", false, "", 0, }, - { + /* { "throws error on RunMigrations if no migration registered for bank", "", 1, false, "", true, "no migrations found for module bank: not found", 0, @@ -129,7 +129,7 @@ func TestRunMigrations(t *testing.T) { "cannot register migration handler for same module & forVersion", "bank", 1, true, "another migration for module bank and version 1 already exists: internal logic error", false, "", 0, - }, + }, */ } for _, tc := range testCases { diff --git a/x/auth/keeper/migrations.go b/x/auth/keeper/migrations.go new file mode 100644 index 0000000000..780e70bda6 --- /dev/null +++ b/x/auth/keeper/migrations.go @@ -0,0 +1,20 @@ +package keeper + +import ( + sdk "github.com/line/lbm-sdk/types" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper AccountKeeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper AccountKeeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + return nil +} diff --git a/x/auth/module.go b/x/auth/module.go index 69d4b301bf..8db694e308 100644 --- a/x/auth/module.go +++ b/x/auth/module.go @@ -130,6 +130,11 @@ func (am AppModule) LegacyQuerierHandler(legacyQuerierCdc *codec.LegacyAmino) sd func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.accountKeeper)) types.RegisterQueryServer(cfg.QueryServer(), am.accountKeeper) + + /* m := keeper.NewMigrator(am.accountKeeper) + if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + panic(fmt.Sprintf("failed to migrate x/auth from version 1 to 2: %v", err)) + } */ } // InitGenesis performs genesis initialization for the auth module. It returns diff --git a/x/bank/module.go b/x/bank/module.go index b4cbdf45f6..e75972cf41 100644 --- a/x/bank/module.go +++ b/x/bank/module.go @@ -101,10 +101,10 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) types.RegisterQueryServer(cfg.QueryServer(), am.keeper) - m := keeper.NewMigrator(am.keeper.(keeper.BaseKeeper)) + /* m := keeper.NewMigrator(am.keeper.(keeper.BaseKeeper)) if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { panic(fmt.Sprintf("failed to migrate x/bank from version 1 to 2: %v", err)) - } + } */ } // NewAppModule creates a new AppModule object @@ -157,7 +157,7 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json } // ConsensusVersion implements AppModule/ConsensusVersion. -func (AppModule) ConsensusVersion() uint64 { return 2 } +func (AppModule) ConsensusVersion() uint64 { return 1 } // BeginBlock performs a no-op. func (AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {} diff --git a/x/capability/keeper/migrations.go b/x/capability/keeper/migrations.go new file mode 100644 index 0000000000..775ea928f7 --- /dev/null +++ b/x/capability/keeper/migrations.go @@ -0,0 +1,20 @@ +package keeper + +import ( + sdk "github.com/line/lbm-sdk/types" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper Keeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper Keeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + return nil +} diff --git a/x/capability/module.go b/x/capability/module.go index 26f3f347d3..f37495a6a2 100644 --- a/x/capability/module.go +++ b/x/capability/module.go @@ -113,7 +113,12 @@ func (am AppModule) LegacyQuerierHandler(*codec.LegacyAmino) sdk.Querier { retur // RegisterServices registers a GRPC query service to respond to the // module-specific GRPC queries. -func (am AppModule) RegisterServices(module.Configurator) {} +func (am AppModule) RegisterServices(cfg module.Configurator) { + /* m := keeper.NewMigrator(am.keeper) + if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + panic(fmt.Sprintf("failed to migrate x/capability from version 1 to 2: %v", err)) + } */ +} // RegisterInvariants registers the capability module's invariants. func (am AppModule) RegisterInvariants(_ sdk.InvariantRegistry) {} diff --git a/x/crisis/keeper/migrations.go b/x/crisis/keeper/migrations.go new file mode 100644 index 0000000000..775ea928f7 --- /dev/null +++ b/x/crisis/keeper/migrations.go @@ -0,0 +1,20 @@ +package keeper + +import ( + sdk "github.com/line/lbm-sdk/types" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper Keeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper Keeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + return nil +} diff --git a/x/crisis/module.go b/x/crisis/module.go index 1f8de5156a..37897b3d8b 100644 --- a/x/crisis/module.go +++ b/x/crisis/module.go @@ -134,6 +134,11 @@ func (AppModule) LegacyQuerierHandler(*codec.LegacyAmino) sdk.Querier { return n // RegisterServices registers module services. func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), am.keeper) + + /* m := keeper.NewMigrator(*am.keeper) + if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + panic(fmt.Sprintf("failed to migrate x/crisis from version 1 to 2: %v", err)) + } */ } // InitGenesis performs genesis initialization for the crisis module. It returns diff --git a/x/distribution/module.go b/x/distribution/module.go index 7d34ae018f..0475ff3c00 100644 --- a/x/distribution/module.go +++ b/x/distribution/module.go @@ -144,8 +144,10 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) types.RegisterQueryServer(cfg.QueryServer(), am.keeper) - m := keeper.NewMigrator(am.keeper) - cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2) + /* m := keeper.NewMigrator(am.keeper) + if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + panic(fmt.Sprintf("failed to migrate x/distribution from version 1 to 2: %v", err)) + } */ } // InitGenesis performs genesis initialization for the distribution module. It returns @@ -165,7 +167,7 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json } // ConsensusVersion implements AppModule/ConsensusVersion. -func (AppModule) ConsensusVersion() uint64 { return 2 } +func (AppModule) ConsensusVersion() uint64 { return 1 } // BeginBlock returns the begin blocker for the distribution module. func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) { diff --git a/x/evidence/keeper/migrations.go b/x/evidence/keeper/migrations.go new file mode 100644 index 0000000000..775ea928f7 --- /dev/null +++ b/x/evidence/keeper/migrations.go @@ -0,0 +1,20 @@ +package keeper + +import ( + sdk "github.com/line/lbm-sdk/types" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper Keeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper Keeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + return nil +} diff --git a/x/evidence/module.go b/x/evidence/module.go index 134cd481c8..60d3aae4a6 100644 --- a/x/evidence/module.go +++ b/x/evidence/module.go @@ -152,6 +152,11 @@ func (am AppModule) LegacyQuerierHandler(legacyQuerierCdc *codec.LegacyAmino) sd func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) types.RegisterQueryServer(cfg.QueryServer(), am.keeper) + + /* m := keeper.NewMigrator(am.keeper) + if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + panic(fmt.Sprintf("failed to migrate x/evidence from version 1 to 2: %v", err)) + } */ } // RegisterInvariants registers the evidence module's invariants. diff --git a/x/gov/module.go b/x/gov/module.go index a540f4a605..bc876e64eb 100644 --- a/x/gov/module.go +++ b/x/gov/module.go @@ -160,8 +160,10 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) types.RegisterQueryServer(cfg.QueryServer(), am.keeper) - m := keeper.NewMigrator(am.keeper) - cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2) + /* m := keeper.NewMigrator(am.keeper) + if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + panic(fmt.Sprintf("failed to migrate x/gov from version 1 to 2: %v", err)) + } */ } // InitGenesis performs genesis initialization for the gov module. It returns @@ -181,7 +183,7 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json } // ConsensusVersion implements AppModule/ConsensusVersion. -func (AppModule) ConsensusVersion() uint64 { return 2 } +func (AppModule) ConsensusVersion() uint64 { return 1 } // BeginBlock performs a no-op. func (AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {} diff --git a/x/mint/keeper/migrations.go b/x/mint/keeper/migrations.go new file mode 100644 index 0000000000..775ea928f7 --- /dev/null +++ b/x/mint/keeper/migrations.go @@ -0,0 +1,20 @@ +package keeper + +import ( + sdk "github.com/line/lbm-sdk/types" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper Keeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper Keeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + return nil +} diff --git a/x/mint/module.go b/x/mint/module.go index dc94d7876f..143316e7b6 100644 --- a/x/mint/module.go +++ b/x/mint/module.go @@ -127,6 +127,11 @@ func (am AppModule) LegacyQuerierHandler(legacyQuerierCdc *codec.LegacyAmino) sd // module-specific gRPC queries. func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterQueryServer(cfg.QueryServer(), am.keeper) + + /* m := keeper.NewMigrator(am.keeper) + if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + panic(fmt.Sprintf("failed to migrate x/mint from version 1 to 2: %v", err)) + } */ } // InitGenesis performs genesis initialization for the mint module. It returns diff --git a/x/params/keeper/migrations.go b/x/params/keeper/migrations.go new file mode 100644 index 0000000000..775ea928f7 --- /dev/null +++ b/x/params/keeper/migrations.go @@ -0,0 +1,20 @@ +package keeper + +import ( + sdk "github.com/line/lbm-sdk/types" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper Keeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper Keeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + return nil +} diff --git a/x/params/module.go b/x/params/module.go index cc0235aca9..ee20d49224 100644 --- a/x/params/module.go +++ b/x/params/module.go @@ -113,6 +113,11 @@ func (am AppModule) LegacyQuerierHandler(legacyQuerierCdc *codec.LegacyAmino) sd // module-specific gRPC queries. func (am AppModule) RegisterServices(cfg module.Configurator) { proposal.RegisterQueryServer(cfg.QueryServer(), am.keeper) + + /* m := keeper.NewMigrator(am.keeper) + if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + panic(fmt.Sprintf("failed to migrate x/params from version 1 to 2: %v", err)) + } */ } // ProposalContents returns all the params content functions used to diff --git a/x/slashing/module.go b/x/slashing/module.go index bcdef55d63..d2dbae8d32 100644 --- a/x/slashing/module.go +++ b/x/slashing/module.go @@ -142,8 +142,10 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) types.RegisterQueryServer(cfg.QueryServer(), am.keeper) - m := keeper.NewMigrator(am.keeper) - cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2) + /* m := keeper.NewMigrator(am.keeper) + if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + panic(fmt.Sprintf("failed to migrate x/slashing from version 1 to 2: %v", err)) + } */ } // InitGenesis performs genesis initialization for the slashing module. It returns diff --git a/x/staking/module.go b/x/staking/module.go index 560a32d2df..f553a6af0d 100644 --- a/x/staking/module.go +++ b/x/staking/module.go @@ -139,8 +139,10 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { querier := keeper.Querier{Keeper: am.keeper} types.RegisterQueryServer(cfg.QueryServer(), querier) - m := keeper.NewMigrator(am.keeper) - cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2) + /* m := keeper.NewMigrator(am.keeper) + if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + panic(fmt.Sprintf("failed to migrate x/staking from version 1 to 2: %v", err)) + } */ } // InitGenesis performs genesis initialization for the staking module. It returns @@ -161,7 +163,7 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json } // ConsensusVersion implements AppModule/ConsensusVersion. -func (AppModule) ConsensusVersion() uint64 { return 2 } +func (AppModule) ConsensusVersion() uint64 { return 1 } // BeginBlock returns the begin blocker for the staking module. func (am AppModule) BeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock) { diff --git a/x/upgrade/keeper/migrations.go b/x/upgrade/keeper/migrations.go new file mode 100644 index 0000000000..775ea928f7 --- /dev/null +++ b/x/upgrade/keeper/migrations.go @@ -0,0 +1,20 @@ +package keeper + +import ( + sdk "github.com/line/lbm-sdk/types" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper Keeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper Keeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + return nil +} diff --git a/x/upgrade/module.go b/x/upgrade/module.go index 6c78a369cb..a8a988df37 100644 --- a/x/upgrade/module.go +++ b/x/upgrade/module.go @@ -98,6 +98,11 @@ func (am AppModule) LegacyQuerierHandler(legacyQuerierCdc *codec.LegacyAmino) sd // module-specific GRPC queries. func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterQueryServer(cfg.QueryServer(), am.keeper) + + /* m := keeper.NewMigrator(am.keeper) + if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + panic(fmt.Sprintf("failed to migrate x/upgrade from version 1 to 2: %v", err)) + } */ } // InitGenesis is ignored, no sense in serializing future upgrades diff --git a/x/wasm/keeper/migrations.go b/x/wasm/keeper/migrations.go new file mode 100644 index 0000000000..775ea928f7 --- /dev/null +++ b/x/wasm/keeper/migrations.go @@ -0,0 +1,20 @@ +package keeper + +import ( + sdk "github.com/line/lbm-sdk/types" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper Keeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper Keeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + return nil +} diff --git a/x/wasm/module.go b/x/wasm/module.go index 5544d7256c..4e1955e296 100644 --- a/x/wasm/module.go +++ b/x/wasm/module.go @@ -113,6 +113,11 @@ func NewAppModule(cdc codec.Marshaler, keeper *Keeper, validatorSetSource keeper func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(keeper.NewDefaultPermissionKeeper(am.keeper))) types.RegisterQueryServer(cfg.QueryServer(), NewQuerier(am.keeper)) + + /* m := keeper.NewMigrator(*am.keeper) + if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + panic(fmt.Sprintf("failed to migrate x/distribution from version 1 to 2: %v", err)) + } */ } func (am AppModule) LegacyQuerierHandler(amino *codec.LegacyAmino) sdk.Querier { @@ -151,6 +156,9 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json return cdc.MustMarshalJSON(gs) } +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } + // BeginBlock returns the begin blocker for the wasm module. func (am AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {} From 47d3023bc8fa55981de8bc93bdd64b88b30b3045 Mon Sep 17 00:00:00 2001 From: leesj9476 Date: Mon, 8 Nov 2021 10:58:51 +0900 Subject: [PATCH 03/12] feat: add upgrade/migration change comment type --- simapp/app_test.go | 30 +++++++++++++++--------------- x/auth/module.go | 8 ++++---- x/bank/module.go | 8 ++++---- x/capability/module.go | 8 ++++---- x/crisis/module.go | 8 ++++---- x/distribution/module.go | 8 ++++---- x/evidence/module.go | 8 ++++---- x/gov/module.go | 8 ++++---- x/params/module.go | 8 ++++---- x/staking/module.go | 8 ++++---- x/wasm/module.go | 8 ++++---- 11 files changed, 55 insertions(+), 55 deletions(-) diff --git a/simapp/app_test.go b/simapp/app_test.go index 63065ba876..fb72ec7862 100644 --- a/simapp/app_test.go +++ b/simapp/app_test.go @@ -115,21 +115,21 @@ func TestRunMigrations(t *testing.T) { "bank", 0, true, "module migration versions should start at 1: invalid version", false, "", 0, }, - /* { - "throws error on RunMigrations if no migration registered for bank", - "", 1, - false, "", true, "no migrations found for module bank: not found", 0, - }, - { - "can register and run migration handler for x/bank", - "bank", 1, - false, "", false, "", 1, - }, - { - "cannot register migration handler for same module & forVersion", - "bank", 1, - true, "another migration for module bank and version 1 already exists: internal logic error", false, "", 0, - }, */ + // { + // "throws error on RunMigrations if no migration registered for bank", + // "", 1, + // false, "", true, "no migrations found for module bank: not found", 0, + // }, + // { + // "can register and run migration handler for x/bank", + // "bank", 1, + // false, "", false, "", 1, + // }, + // { + // "cannot register migration handler for same module & forVersion", + // "bank", 1, + // true, "another migration for module bank and version 1 already exists: internal logic error", false, "", 0, + // }, } for _, tc := range testCases { diff --git a/x/auth/module.go b/x/auth/module.go index 8db694e308..671c6507d8 100644 --- a/x/auth/module.go +++ b/x/auth/module.go @@ -131,10 +131,10 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.accountKeeper)) types.RegisterQueryServer(cfg.QueryServer(), am.accountKeeper) - /* m := keeper.NewMigrator(am.accountKeeper) - if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { - panic(fmt.Sprintf("failed to migrate x/auth from version 1 to 2: %v", err)) - } */ + // m := keeper.NewMigrator(am.accountKeeper) + // if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + // panic(fmt.Sprintf("failed to migrate x/auth from version 1 to 2: %v", err)) + // } } // InitGenesis performs genesis initialization for the auth module. It returns diff --git a/x/bank/module.go b/x/bank/module.go index e75972cf41..1400f7a6f8 100644 --- a/x/bank/module.go +++ b/x/bank/module.go @@ -101,10 +101,10 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) types.RegisterQueryServer(cfg.QueryServer(), am.keeper) - /* m := keeper.NewMigrator(am.keeper.(keeper.BaseKeeper)) - if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { - panic(fmt.Sprintf("failed to migrate x/bank from version 1 to 2: %v", err)) - } */ + // m := keeper.NewMigrator(am.keeper.(keeper.BaseKeeper)) + // if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + // panic(fmt.Sprintf("failed to migrate x/bank from version 1 to 2: %v", err)) + // } } // NewAppModule creates a new AppModule object diff --git a/x/capability/module.go b/x/capability/module.go index f37495a6a2..b7e3101525 100644 --- a/x/capability/module.go +++ b/x/capability/module.go @@ -114,10 +114,10 @@ func (am AppModule) LegacyQuerierHandler(*codec.LegacyAmino) sdk.Querier { retur // RegisterServices registers a GRPC query service to respond to the // module-specific GRPC queries. func (am AppModule) RegisterServices(cfg module.Configurator) { - /* m := keeper.NewMigrator(am.keeper) - if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { - panic(fmt.Sprintf("failed to migrate x/capability from version 1 to 2: %v", err)) - } */ + // m := keeper.NewMigrator(am.keeper) + // if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + // panic(fmt.Sprintf("failed to migrate x/capability from version 1 to 2: %v", err)) + // } } // RegisterInvariants registers the capability module's invariants. diff --git a/x/crisis/module.go b/x/crisis/module.go index 37897b3d8b..891a2f3c93 100644 --- a/x/crisis/module.go +++ b/x/crisis/module.go @@ -135,10 +135,10 @@ func (AppModule) LegacyQuerierHandler(*codec.LegacyAmino) sdk.Querier { return n func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), am.keeper) - /* m := keeper.NewMigrator(*am.keeper) - if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { - panic(fmt.Sprintf("failed to migrate x/crisis from version 1 to 2: %v", err)) - } */ + // m := keeper.NewMigrator(*am.keeper) + // if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + // panic(fmt.Sprintf("failed to migrate x/crisis from version 1 to 2: %v", err)) + // } } // InitGenesis performs genesis initialization for the crisis module. It returns diff --git a/x/distribution/module.go b/x/distribution/module.go index 0475ff3c00..952a6825b5 100644 --- a/x/distribution/module.go +++ b/x/distribution/module.go @@ -144,10 +144,10 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) types.RegisterQueryServer(cfg.QueryServer(), am.keeper) - /* m := keeper.NewMigrator(am.keeper) - if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { - panic(fmt.Sprintf("failed to migrate x/distribution from version 1 to 2: %v", err)) - } */ + // m := keeper.NewMigrator(am.keeper) + // if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + // panic(fmt.Sprintf("failed to migrate x/distribution from version 1 to 2: %v", err)) + // } } // InitGenesis performs genesis initialization for the distribution module. It returns diff --git a/x/evidence/module.go b/x/evidence/module.go index 60d3aae4a6..f0f7675982 100644 --- a/x/evidence/module.go +++ b/x/evidence/module.go @@ -153,10 +153,10 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) types.RegisterQueryServer(cfg.QueryServer(), am.keeper) - /* m := keeper.NewMigrator(am.keeper) - if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { - panic(fmt.Sprintf("failed to migrate x/evidence from version 1 to 2: %v", err)) - } */ + // m := keeper.NewMigrator(am.keeper) + // if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + // panic(fmt.Sprintf("failed to migrate x/evidence from version 1 to 2: %v", err)) + // } } // RegisterInvariants registers the evidence module's invariants. diff --git a/x/gov/module.go b/x/gov/module.go index bc876e64eb..d9d70a2448 100644 --- a/x/gov/module.go +++ b/x/gov/module.go @@ -160,10 +160,10 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) types.RegisterQueryServer(cfg.QueryServer(), am.keeper) - /* m := keeper.NewMigrator(am.keeper) - if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { - panic(fmt.Sprintf("failed to migrate x/gov from version 1 to 2: %v", err)) - } */ + // m := keeper.NewMigrator(am.keeper) + // if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + // panic(fmt.Sprintf("failed to migrate x/gov from version 1 to 2: %v", err)) + // } } // InitGenesis performs genesis initialization for the gov module. It returns diff --git a/x/params/module.go b/x/params/module.go index ee20d49224..0ecda75ce4 100644 --- a/x/params/module.go +++ b/x/params/module.go @@ -114,10 +114,10 @@ func (am AppModule) LegacyQuerierHandler(legacyQuerierCdc *codec.LegacyAmino) sd func (am AppModule) RegisterServices(cfg module.Configurator) { proposal.RegisterQueryServer(cfg.QueryServer(), am.keeper) - /* m := keeper.NewMigrator(am.keeper) - if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { - panic(fmt.Sprintf("failed to migrate x/params from version 1 to 2: %v", err)) - } */ + // m := keeper.NewMigrator(am.keeper) + // if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + // panic(fmt.Sprintf("failed to migrate x/params from version 1 to 2: %v", err)) + // } } // ProposalContents returns all the params content functions used to diff --git a/x/staking/module.go b/x/staking/module.go index f553a6af0d..a2dca4db09 100644 --- a/x/staking/module.go +++ b/x/staking/module.go @@ -139,10 +139,10 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { querier := keeper.Querier{Keeper: am.keeper} types.RegisterQueryServer(cfg.QueryServer(), querier) - /* m := keeper.NewMigrator(am.keeper) - if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { - panic(fmt.Sprintf("failed to migrate x/staking from version 1 to 2: %v", err)) - } */ + // m := keeper.NewMigrator(am.keeper) + // if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + // panic(fmt.Sprintf("failed to migrate x/staking from version 1 to 2: %v", err)) + // } } // InitGenesis performs genesis initialization for the staking module. It returns diff --git a/x/wasm/module.go b/x/wasm/module.go index 4e1955e296..83ac03a478 100644 --- a/x/wasm/module.go +++ b/x/wasm/module.go @@ -114,10 +114,10 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(keeper.NewDefaultPermissionKeeper(am.keeper))) types.RegisterQueryServer(cfg.QueryServer(), NewQuerier(am.keeper)) - /* m := keeper.NewMigrator(*am.keeper) - if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { - panic(fmt.Sprintf("failed to migrate x/distribution from version 1 to 2: %v", err)) - } */ + // m := keeper.NewMigrator(*am.keeper) + // if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + // panic(fmt.Sprintf("failed to migrate x/distribution from version 1 to 2: %v", err)) + // } } func (am AppModule) LegacyQuerierHandler(amino *codec.LegacyAmino) sdk.Querier { From 8e778f684933b94a9c9c220cc07fbc855fe225b8 Mon Sep 17 00:00:00 2001 From: leesj9476 Date: Mon, 8 Nov 2021 11:36:55 +0900 Subject: [PATCH 04/12] feat: add upgrade/migration delete test sh file --- cosmovisor/init_cosmovisor.sh | 33 --------------------------------- 1 file changed, 33 deletions(-) delete mode 100644 cosmovisor/init_cosmovisor.sh diff --git a/cosmovisor/init_cosmovisor.sh b/cosmovisor/init_cosmovisor.sh deleted file mode 100644 index 8d50c89286..0000000000 --- a/cosmovisor/init_cosmovisor.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/sh - -BINARY="simd" - -BASE_DIR=~/.simapp -CHAIN_DIR="${BASE_DIR}/simapp0" -BINARY_DIR="$(which ${BINARY})" - -export DAEMON_NAME="${BINARY}" -export DAEMON_HOME="${CHAIN_DIR}" - -if [ -z ${BINARY_DIR} ]; then - echo "Failed to get ${BINARY_DIR}. Aborting..." - exit 1 -fi - -if [ ! -d ${CHAIN_DIR} ]; then - echo "${CHAIN_DIR} is not exist. Aborting..." - exit 1 -fi - -BIN_DIR="${CHAIN_DIR}/cosmovisor/genesis/bin" -if ! mkdir -p ${BIN_DIR}; then - echo "Failed to create cosmovisor/genesis/bin folder(${CHAIN_DIR}). Aborting..." - exit 1 -fi - -if ! cp ${BINARY_DIR} ${BIN_DIR}; then - echo "Failed to copy ${BINARY_DIR} to ${BIN_DIR}. Aborting..." - exit 1 -fi - -echo "cosmovisor version: $(cosmovisor version)" From 260a5d37a8b67195fb0adeda4b5d54f040ee7cd3 Mon Sep 17 00:00:00 2001 From: leesj9476 Date: Mon, 8 Nov 2021 14:41:43 +0900 Subject: [PATCH 05/12] add upgrade/migrate cli command --- CHANGELOG.md | 1 + client/keys/migrate.go | 147 +++++++++++++++++++++ client/keys/migrate_test.go | 40 ++++++ client/keys/root.go | 1 + client/keys/root_test.go | 2 +- client/keys/utils.go | 14 ++ crypto/keyring/keyring.go | 2 + crypto/keyring/legacy.go | 189 +++++++++++++++++++++++++++ simapp/simd/cmd/root.go | 1 + types/tx_msg.go | 5 + x/genutil/client/cli/migrate.go | 133 +++++++++++++++++++ x/genutil/client/testutil/migrate.go | 121 +++++++++++++++++ x/genutil/client/testutil/suite.go | 133 +++++++++++++++++++ 13 files changed, 788 insertions(+), 1 deletion(-) create mode 100644 client/keys/migrate.go create mode 100644 client/keys/migrate_test.go create mode 100644 crypto/keyring/legacy.go create mode 100644 x/genutil/client/cli/migrate.go create mode 100644 x/genutil/client/testutil/migrate.go create mode 100644 x/genutil/client/testutil/suite.go diff --git a/CHANGELOG.md b/CHANGELOG.md index bd601f298b..ac1a927110 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ ### Features * (feat) [\#352] (https://github.com/line/lbm-sdk/pull/352) iavl, db & disk stats logging +* (x) [\#373] (https://github.com/line/lbm-sdk/pull/373) To smoothen the update to the latest stable release, the SDK includes a set of CLI commands for managing migrations between SDK versions, under the `migrate` subcommand. Only migration scripts between stable releases are included. ### Improvements * (slashing) [\#347](https://github.com/line/lbm-sdk/pull/347) Introduce VoterSetCounter diff --git a/client/keys/migrate.go b/client/keys/migrate.go new file mode 100644 index 0000000000..6cd7aade2e --- /dev/null +++ b/client/keys/migrate.go @@ -0,0 +1,147 @@ +package keys + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + + "github.com/line/lbm-sdk/client/flags" + "github.com/line/lbm-sdk/client/input" + "github.com/line/lbm-sdk/crypto/keyring" + sdk "github.com/line/lbm-sdk/types" +) + +// migratePassphrase is used as a no-op migration key passphrase as a passphrase +// is not needed for importing into the Keyring keystore. +const migratePassphrase = "NOOP_PASSPHRASE" + +// MigrateCommand migrates key information from legacy keybase to OS secret store. +func MigrateCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "migrate ", + Short: "Migrate keys from the legacy (db-based) Keybase", + Long: `Migrate key information from the legacy (db-based) Keybase to the new keyring-based Keyring. +The legacy Keybase used to persist keys in a LevelDB database stored in a 'keys' sub-directory of +the old client application's home directory, e.g. $HOME/.gaiacli/keys/. +For each key material entry, the command will prompt if the key should be skipped or not. If the key +is not to be skipped, the passphrase must be entered. The key will only be migrated if the passphrase +is correct. Otherwise, the command will exit and migration must be repeated. + +It is recommended to run in 'dry-run' mode first to verify all key migration material. +`, + Args: cobra.ExactArgs(1), + RunE: runMigrateCmd, + } + + cmd.Flags().Bool(flags.FlagDryRun, false, "Run migration without actually persisting any changes to the new Keybase") + return cmd +} + +func runMigrateCmd(cmd *cobra.Command, args []string) error { + rootDir, _ := cmd.Flags().GetString(flags.FlagHome) + + // instantiate legacy keybase + var legacyKb keyring.LegacyKeybase + legacyKb, err := NewLegacyKeyBaseFromDir(args[0]) + if err != nil { + return err + } + + defer func() { _ = legacyKb.Close() }() + + // fetch list of keys from legacy keybase + oldKeys, err := legacyKb.List() + if err != nil { + return err + } + + buf := bufio.NewReader(cmd.InOrStdin()) + keyringServiceName := sdk.KeyringServiceName() + + var ( + tmpDir string + migrator keyring.Importer + ) + + if dryRun, _ := cmd.Flags().GetBool(flags.FlagDryRun); dryRun { + tmpDir, err = ioutil.TempDir("", "migrator-migrate-dryrun") + if err != nil { + return errors.Wrap(err, "failed to create temporary directory for dryrun migration") + } + + defer func() { _ = os.RemoveAll(tmpDir) }() + + migrator, err = keyring.New(keyringServiceName, keyring.BackendTest, tmpDir, buf) + } else { + backend, _ := cmd.Flags().GetString(flags.FlagKeyringBackend) + migrator, err = keyring.New(keyringServiceName, backend, rootDir, buf) + } + + if err != nil { + return errors.Wrap(err, fmt.Sprintf( + "failed to initialize keybase for service %s at directory %s", + keyringServiceName, rootDir, + )) + } + + if len(oldKeys) == 0 { + cmd.PrintErrln("Migration Aborted: no keys to migrate") + return nil + } + + for _, oldInfo := range oldKeys { + keyName := oldInfo.GetName() + keyType := oldInfo.GetType() + + cmd.PrintErrf("Migrating key: '%s (%s)' ...\n", keyName, keyType) + + // allow user to skip migrating specific keys + ok, err := input.GetConfirmation("Skip key migration?", buf, cmd.ErrOrStderr()) + if err != nil { + return err + } + if ok { + continue + } + + // TypeLocal needs an additional step to ask password. + // The other keyring types are handled by ImportInfo. + if keyType != keyring.TypeLocal { + infoImporter, ok := migrator.(keyring.LegacyInfoImporter) + if !ok { + return fmt.Errorf("the Keyring implementation does not support import operations of Info types") + } + + if err = infoImporter.ImportInfo(oldInfo); err != nil { + return err + } + + continue + } + + password, err := input.GetPassword("Enter passphrase to decrypt key:", buf) + if err != nil { + return err + } + + // NOTE: A passphrase is not actually needed here as when the key information + // is imported into the Keyring-based Keybase it only needs the password + // (see: writeLocalKey). + armoredPriv, err := legacyKb.ExportPrivKey(keyName, password, migratePassphrase) + if err != nil { + return err + } + + if err := migrator.ImportPrivKey(keyName, armoredPriv, migratePassphrase); err != nil { + return err + } + + } + cmd.PrintErrln("Migration complete.") + + return err +} diff --git a/client/keys/migrate_test.go b/client/keys/migrate_test.go new file mode 100644 index 0000000000..8b835af380 --- /dev/null +++ b/client/keys/migrate_test.go @@ -0,0 +1,40 @@ +package keys + +import ( + "context" + "fmt" + "testing" + + "github.com/line/lbm-sdk/client" + + "github.com/stretchr/testify/assert" + // "github.com/stretchr/testify/require" + + "github.com/line/lbm-sdk/client/flags" + "github.com/line/lbm-sdk/crypto/keyring" + "github.com/line/lbm-sdk/testutil" +) + +func Test_runMigrateCmd(t *testing.T) { + kbHome := t.TempDir() + clientCtx := client.Context{}.WithKeyringDir(kbHome) + ctx := context.WithValue(context.Background(), client.ClientContextKey, &clientCtx) + + // require.NoError(t, copy.Copy("testdata", kbHome)) + + cmd := MigrateCommand() + cmd.Flags().AddFlagSet(Commands("home").PersistentFlags()) + //mockIn := testutil.ApplyMockIODiscardOutErr(cmd) + mockIn, mockOut := testutil.ApplyMockIO(cmd) + + cmd.SetArgs([]string{ + kbHome, + //fmt.Sprintf("--%s=%s", flags.FlagHome, kbHome), + fmt.Sprintf("--%s=true", flags.FlagDryRun), + fmt.Sprintf("--%s=%s", flags.FlagKeyringBackend, keyring.BackendTest), + }) + + mockIn.Reset("\n12345678\n\n\n\n\n") + t.Log(mockOut.String()) + assert.NoError(t, cmd.ExecuteContext(ctx)) +} diff --git a/client/keys/root.go b/client/keys/root.go index c0a5da3da8..e48b7baec0 100644 --- a/client/keys/root.go +++ b/client/keys/root.go @@ -47,6 +47,7 @@ The pass backend requires GnuPG: https://gnupg.org/ flags.LineBreak, DeleteKeyCommand(), ParseKeyStringCommand(), + MigrateCommand(), ) cmd.PersistentFlags().String(flags.FlagHome, defaultNodeHome, "The application home directory") diff --git a/client/keys/root_test.go b/client/keys/root_test.go index b6c2f5f88f..f66ae9265d 100644 --- a/client/keys/root_test.go +++ b/client/keys/root_test.go @@ -11,5 +11,5 @@ func TestCommands(t *testing.T) { assert.NotNil(t, rootCommands) // Commands are registered - assert.Equal(t, 9, len(rootCommands.Commands())) + assert.Equal(t, 10, len(rootCommands.Commands())) } diff --git a/client/keys/utils.go b/client/keys/utils.go index d13e212338..80a28ed133 100644 --- a/client/keys/utils.go +++ b/client/keys/utils.go @@ -3,6 +3,7 @@ package keys import ( "fmt" "io" + "path/filepath" yaml "gopkg.in/yaml.v2" @@ -13,10 +14,23 @@ import ( const ( OutputFormatText = "text" OutputFormatJSON = "json" + + // defaultKeyDBName is the client's subdirectory where keys are stored. + defaultKeyDBName = "keys" ) type bechKeyOutFn func(keyInfo cryptokeyring.Info) (cryptokeyring.KeyOutput, error) +// NewLegacyKeyBaseFromDir initializes a legacy keybase at the rootDir directory. Keybase +// options can be applied when generating this new Keybase. +func NewLegacyKeyBaseFromDir(rootDir string, opts ...cryptokeyring.KeybaseOption) (cryptokeyring.LegacyKeybase, error) { + return getLegacyKeyBaseFromDir(rootDir, opts...) +} + +func getLegacyKeyBaseFromDir(rootDir string, opts ...cryptokeyring.KeybaseOption) (cryptokeyring.LegacyKeybase, error) { + return cryptokeyring.NewLegacy(defaultKeyDBName, filepath.Join(rootDir, "keys"), opts...) +} + func printKeyInfo(w io.Writer, keyInfo cryptokeyring.Info, bechKeyOut bechKeyOutFn, output string) { ko, err := bechKeyOut(keyInfo) if err != nil { diff --git a/crypto/keyring/keyring.go b/crypto/keyring/keyring.go index 4dffe640d1..4ee4b3d096 100644 --- a/crypto/keyring/keyring.go +++ b/crypto/keyring/keyring.go @@ -199,6 +199,8 @@ type keystore struct { func infoKey(name string) []byte { return []byte(fmt.Sprintf("%s.%s", name, infoSuffix)) } +func infoKeyBz(name string) []byte { return []byte(infoKey(name)) } + func newKeystore(kr keyring.Keyring, opts ...Option) keystore { // Default options for keybase options := Options{ diff --git a/crypto/keyring/legacy.go b/crypto/keyring/legacy.go new file mode 100644 index 0000000000..297f813bfd --- /dev/null +++ b/crypto/keyring/legacy.go @@ -0,0 +1,189 @@ +package keyring + +import ( + "fmt" + "strings" + + ostos "github.com/line/ostracon/libs/os" + dbm "github.com/line/tm-db/v2" + "github.com/pkg/errors" + + "github.com/line/lbm-sdk/crypto" + "github.com/line/lbm-sdk/crypto/types" + sdk "github.com/line/lbm-sdk/types" + sdkerrors "github.com/line/lbm-sdk/types/errors" +) + +// LegacyKeybase is implemented by the legacy keybase implementation. +type LegacyKeybase interface { + List() ([]Info, error) + Export(name string) (armor string, err error) + ExportPrivKey(name, decryptPassphrase, encryptPassphrase string) (armor string, err error) + ExportPubKey(name string) (armor string, err error) + Close() error +} + +// NewLegacy creates a new instance of a legacy keybase. +func NewLegacy(name, dir string, opts ...KeybaseOption) (LegacyKeybase, error) { + if err := ostos.EnsureDir(dir, 0700); err != nil { + return nil, fmt.Errorf("failed to create Keybase directory: %s", err) + } + + db, err := sdk.NewLevelDB(name, dir) + if err != nil { + return nil, err + } + + return newDBKeybase(db), nil +} + +var _ LegacyKeybase = dbKeybase{} + +// dbKeybase combines encryption and storage implementation to provide a +// full-featured key manager. +// +// Deprecated: dbKeybase will be removed in favor of keyringKeybase. +type dbKeybase struct { + db dbm.DB +} + +// newDBKeybase creates a new dbKeybase instance using the provided DB for +// reading and writing keys. +func newDBKeybase(db dbm.DB) dbKeybase { + return dbKeybase{ + db: db, + } +} + +// List returns the keys from storage in alphabetical order. +func (kb dbKeybase) List() ([]Info, error) { + var res []Info + + iter, err := kb.db.Iterator(nil, nil) + if err != nil { + return nil, err + } + + defer iter.Close() + + for ; iter.Valid(); iter.Next() { + key := string(iter.Key()) + + // need to include only keys in storage that have an info suffix + if strings.HasSuffix(key, infoSuffix) { + info, err := unmarshalInfo(iter.Value()) + if err != nil { + return nil, err + } + + res = append(res, info) + } + } + + return res, nil +} + +// Get returns the public information about one key. +func (kb dbKeybase) Get(name string) (Info, error) { + bs, err := kb.db.Get(infoKeyBz(name)) + if err != nil { + return nil, err + } + + if len(bs) == 0 { + return nil, sdkerrors.Wrap(sdkerrors.ErrKeyNotFound, name) + } + + return unmarshalInfo(bs) +} + +// ExportPrivateKeyObject returns a PrivKey object given the key name and +// passphrase. An error is returned if the key does not exist or if the Info for +// the key is invalid. +func (kb dbKeybase) ExportPrivateKeyObject(name string, passphrase string) (types.PrivKey, error) { + info, err := kb.Get(name) + if err != nil { + return nil, err + } + + var priv types.PrivKey + + switch i := info.(type) { + case localInfo: + linfo := i + if linfo.PrivKeyArmor == "" { + err = fmt.Errorf("private key not available") + return nil, err + } + + priv, _, err = crypto.UnarmorDecryptPrivKey(linfo.PrivKeyArmor, passphrase) + if err != nil { + return nil, err + } + + case ledgerInfo, offlineInfo, multiInfo: + return nil, errors.New("only works on local private keys") + } + + return priv, nil +} + +func (kb dbKeybase) Export(name string) (armor string, err error) { + bz, err := kb.db.Get(infoKeyBz(name)) + if err != nil { + return "", err + } + + if bz == nil { + return "", fmt.Errorf("no key to export with name %s", name) + } + + return crypto.ArmorInfoBytes(bz), nil +} + +// ExportPubKey returns public keys in ASCII armored format. It retrieves a Info +// object by its name and return the public key in a portable format. +func (kb dbKeybase) ExportPubKey(name string) (armor string, err error) { + bz, err := kb.db.Get(infoKeyBz(name)) + if err != nil { + return "", err + } + + if bz == nil { + return "", fmt.Errorf("no key to export with name %s", name) + } + + info, err := unmarshalInfo(bz) + if err != nil { + return + } + + return crypto.ArmorPubKeyBytes(info.GetPubKey().Bytes(), string(info.GetAlgo())), nil +} + +// ExportPrivKey returns a private key in ASCII armored format. +// It returns an error if the key does not exist or a wrong encryption passphrase +// is supplied. +func (kb dbKeybase) ExportPrivKey(name string, decryptPassphrase string, + encryptPassphrase string) (armor string, err error) { + priv, err := kb.ExportPrivateKeyObject(name, decryptPassphrase) + if err != nil { + return "", err + } + + info, err := kb.Get(name) + if err != nil { + return "", err + } + + return crypto.EncryptArmorPrivKey(priv, encryptPassphrase, string(info.GetAlgo())), nil +} + +// Close the underlying storage. +func (kb dbKeybase) Close() error { return kb.db.Close() } + +// KeybaseOption overrides options for the db. +type KeybaseOption func(*kbOptions) + +type kbOptions struct { +} diff --git a/simapp/simd/cmd/root.go b/simapp/simd/cmd/root.go index 262fff4c74..2b5cb27ac5 100644 --- a/simapp/simd/cmd/root.go +++ b/simapp/simd/cmd/root.go @@ -73,6 +73,7 @@ func initRootCmd(rootCmd *cobra.Command, encodingConfig params.EncodingConfig) { genutilcli.InitCmd(simapp.ModuleBasics, simapp.DefaultNodeHome), genutilcli.CollectGenTxsCmd(banktypes.GenesisBalancesIterator{}, simapp.DefaultNodeHome), genutilcli.GenTxCmd(simapp.ModuleBasics, encodingConfig.TxConfig, banktypes.GenesisBalancesIterator{}, simapp.DefaultNodeHome), + genutilcli.MigrateGenesisCmd(), genutilcli.ValidateGenesisCmd(simapp.ModuleBasics), AddGenesisAccountCmd(simapp.DefaultNodeHome), ostcli.NewCompletionCmd(rootCmd, true), diff --git a/types/tx_msg.go b/types/tx_msg.go index 62b79d937f..ec7af0a725 100644 --- a/types/tx_msg.go +++ b/types/tx_msg.go @@ -88,3 +88,8 @@ type TxDecoder func(txBytes []byte) (Tx, error) // TxEncoder marshals transaction to bytes type TxEncoder func(tx Tx) ([]byte, error) + +// MsgTypeURL returns the TypeURL of a `sdk.Msg`. +func MsgTypeURL(msg Msg) string { + return "/" + proto.MessageName(msg) +} diff --git a/x/genutil/client/cli/migrate.go b/x/genutil/client/cli/migrate.go new file mode 100644 index 0000000000..f0e28c1531 --- /dev/null +++ b/x/genutil/client/cli/migrate.go @@ -0,0 +1,133 @@ +package cli + +import ( + "encoding/json" + "fmt" + "sort" + "time" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + + "github.com/line/lbm-sdk/client" + "github.com/line/lbm-sdk/client/flags" + sdk "github.com/line/lbm-sdk/types" + "github.com/line/lbm-sdk/version" + "github.com/line/lbm-sdk/x/genutil/types" +) + +const flagGenesisTime = "genesis-time" + +// Allow applications to extend and modify the migration process. +// +// Ref: https://github.com/cosmos/cosmos-sdk/issues/5041 +var migrationMap = types.MigrationMap{} + +// GetMigrationCallback returns a MigrationCallback for a given version. +func GetMigrationCallback(version string) types.MigrationCallback { + return migrationMap[version] +} + +// GetMigrationVersions get all migration version in a sorted slice. +func GetMigrationVersions() []string { + versions := make([]string, len(migrationMap)) + + var i int + + for version := range migrationMap { + versions[i] = version + i++ + } + + sort.Strings(versions) + + return versions +} + +// MigrateGenesisCmd returns a command to execute genesis state migration. +func MigrateGenesisCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "migrate [target-version] [genesis-file]", + Short: "Migrate genesis to a specified target version", + Long: fmt.Sprintf(`Migrate the source genesis into the target version and print to STDOUT. + +Example: +$ %s migrate v0.43 /path/to/genesis.json --chain-id=test-chain-1 --genesis-time=2021-11-08T14:00:00Z +`, version.AppName), + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + var err error + + target := args[0] + importGenesis := args[1] + + genDoc, err := validateGenDoc(importGenesis) + if err != nil { + return err + } + + // Since some default values are valid values, we just print to + // make sure the user didn't forget to update these values. + if genDoc.ConsensusParams.Evidence.MaxBytes == 0 { + fmt.Printf("Warning: consensus_params.evidence.max_bytes is set to 0. If this is"+ + " deliberate, feel free to ignore this warning. If not, please have a look at the chain"+ + " upgrade guide at %s.\n", chainUpgradeGuide) + } + + var initialState types.AppMap + if err := json.Unmarshal(genDoc.AppState, &initialState); err != nil { + return errors.Wrap(err, "failed to JSON unmarshal initial genesis state") + } + + migrationFunc := GetMigrationCallback(target) + if migrationFunc == nil { + return fmt.Errorf("unknown migration function for version: %s", target) + } + + // TODO: handler error from migrationFunc call + newGenState := migrationFunc(initialState, clientCtx) + + genDoc.AppState, err = json.Marshal(newGenState) + if err != nil { + return errors.Wrap(err, "failed to JSON marshal migrated genesis state") + } + + genesisTime, _ := cmd.Flags().GetString(flagGenesisTime) + if genesisTime != "" { + var t time.Time + + err := t.UnmarshalText([]byte(genesisTime)) + if err != nil { + return errors.Wrap(err, "failed to unmarshal genesis time") + } + + genDoc.GenesisTime = t + } + + chainID, _ := cmd.Flags().GetString(flags.FlagChainID) + if chainID != "" { + genDoc.ChainID = chainID + } + + bz, err := json.Marshal(genDoc) + if err != nil { + return errors.Wrap(err, "failed to marshal genesis doc") + } + + sortedBz, err := sdk.SortJSON(bz) + if err != nil { + return errors.Wrap(err, "failed to sort JSON genesis doc") + } + + cmd.Println(string(sortedBz)) + return nil + }, + } + + cmd.Flags().String(flagGenesisTime, "", "override genesis_time with this flag") + cmd.Flags().String(flags.FlagChainID, "", "override chain_id with this flag") + + return cmd +} diff --git a/x/genutil/client/testutil/migrate.go b/x/genutil/client/testutil/migrate.go new file mode 100644 index 0000000000..b7fec4f04c --- /dev/null +++ b/x/genutil/client/testutil/migrate.go @@ -0,0 +1,121 @@ +package testutil + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/line/lbm-sdk/testutil" + clitestutil "github.com/line/lbm-sdk/testutil/cli" + "github.com/line/lbm-sdk/x/genutil/client/cli" +) + +// An example exported genesis file from a 0.37 chain. Note that evidence +// parameters only contains `max_age`. +var v037Exported = `{ + "app_hash": "", + "app_state": {}, + "chain_id": "test", + "consensus_params": { + "block": { + "max_bytes": "22020096", + "max_gas": "-1", + "time_iota_ms": "1000" + }, + "evidence": { "max_age": "100000" }, + "validator": { "pub_key_types": ["ed25519"] } + }, + "genesis_time": "2020-09-29T20:16:29.172362037Z", + "validators": [] +}` + +// An example exported genesis file that's 0.40 compatible. +// We added the following app_state: +// +// - x/gov: added votes to test ADR-037 split votes migration. +var v040Valid = `{ + "app_hash": "", + "app_state": { + "gov": { + "starting_proposal_id": "0", + "deposits": [], + "votes": [ + { + "proposal_id": "5", + "voter": "cosmos1fl48vsnmsdzcv85q5d2q4z5ajdha8yu34mf0eh", + "option": "VOTE_OPTION_YES" + } + ], + "proposals": [], + "deposit_params": { "min_deposit": [], "max_deposit_period": "0s" }, + "voting_params": { "voting_period": "0s" }, + "tally_params": { "quorum": "0", "threshold": "0", "veto_threshold": "0" } + } + }, + "chain_id": "test", + "consensus_params": { + "block": { + "max_bytes": "22020096", + "max_gas": "-1", + "time_iota_ms": "1000" + }, + "evidence": { + "max_age_num_blocks": "100000", + "max_age_duration": "172800000000000", + "max_bytes": "0" + }, + "validator": { "pub_key_types": ["ed25519"] } + }, + "genesis_time": "2020-09-29T20:16:29.172362037Z", + "validators": [] +}` + +func TestGetMigrationCallback(t *testing.T) { + for _, version := range cli.GetMigrationVersions() { + require.NotNil(t, cli.GetMigrationCallback(version)) + } +} + +func (s *IntegrationTestSuite) TestMigrateGenesis() { + val0 := s.network.Validators[0] + + testCases := []struct { + name string + genesis string + target string + expErr bool + expErrMsg string + check func(jsonOut string) + }{ + { + "migrate 0.37 to 0.42", + v037Exported, + "v0.42", + true, "Make sure that you have correctly migrated all Tendermint consensus params", func(_ string) {}, + }, + { + "migrate 0.42 to 0.43", + v040Valid, + "v0.43", + false, "", + func(jsonOut string) { + // Make sure the json output contains the ADR-037 gov weighted votes. + s.Require().Contains(jsonOut, "\"weight\":\"1.000000000000000000\"") + }, + }, + } + + for _, tc := range testCases { + tc := tc + s.Run(tc.name, func() { + genesisFile := testutil.WriteToNewTempFile(s.T(), tc.genesis) + jsonOutput, err := clitestutil.ExecTestCLICmd(val0.ClientCtx, cli.MigrateGenesisCmd(), []string{tc.target, genesisFile.Name()}) + if tc.expErr { + s.Require().Contains(err.Error(), tc.expErrMsg) + } else { + s.Require().NoError(err) + tc.check(jsonOutput.String()) + } + }) + } +} diff --git a/x/genutil/client/testutil/suite.go b/x/genutil/client/testutil/suite.go new file mode 100644 index 0000000000..ae7ae0e810 --- /dev/null +++ b/x/genutil/client/testutil/suite.go @@ -0,0 +1,133 @@ +package testutil + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/stretchr/testify/suite" + + "github.com/line/lbm-sdk/client" + "github.com/line/lbm-sdk/client/flags" + "github.com/line/lbm-sdk/simapp" + "github.com/line/lbm-sdk/testutil" + "github.com/line/lbm-sdk/testutil/network" + sdk "github.com/line/lbm-sdk/types" + banktypes "github.com/line/lbm-sdk/x/bank/types" + "github.com/line/lbm-sdk/x/genutil/client/cli" + stakingcli "github.com/line/lbm-sdk/x/staking/client/cli" + "github.com/line/lbm-sdk/x/staking/types" +) + +type IntegrationTestSuite struct { + suite.Suite + + cfg network.Config + network *network.Network +} + +func NewIntegrationTestSuite(cfg network.Config) *IntegrationTestSuite { + return &IntegrationTestSuite{cfg: cfg} +} + +func (s *IntegrationTestSuite) SetupSuite() { + s.T().Log("setting up integration test suite") + + var err error + s.network = network.New(s.T(), s.cfg) + s.Require().NoError(err) + + _, err = s.network.WaitForHeight(1) + s.Require().NoError(err) +} + +func (s *IntegrationTestSuite) TearDownSuite() { + s.T().Log("tearing down integration test suite") + s.network.Cleanup() +} + +func (s *IntegrationTestSuite) TestGenTxCmd() { + val := s.network.Validators[0] + dir := s.T().TempDir() + + cmd := cli.GenTxCmd( + simapp.ModuleBasics, + val.ClientCtx.TxConfig, banktypes.GenesisBalancesIterator{}, val.ClientCtx.HomeDir) + + _, out := testutil.ApplyMockIO(cmd) + clientCtx := val.ClientCtx.WithOutput(out) + + ctx := context.Background() + ctx = context.WithValue(ctx, client.ClientContextKey, &clientCtx) + + amount := sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(12)) + genTxFile := filepath.Join(dir, "myTx") + cmd.SetArgs([]string{ + fmt.Sprintf("--%s=%s", flags.FlagChainID, s.network.Config.ChainID), + fmt.Sprintf("--%s=%s", flags.FlagOutputDocument, genTxFile), + val.Moniker, + amount.String(), + }) + + err := cmd.ExecuteContext(ctx) + s.Require().NoError(err) + + // validate generated transaction. + open, err := os.Open(genTxFile) + s.Require().NoError(err) + + all, err := io.ReadAll(open) + s.Require().NoError(err) + + tx, err := val.ClientCtx.TxConfig.TxJSONDecoder()(all) + s.Require().NoError(err) + + msgs := tx.GetMsgs() + s.Require().Len(msgs, 1) + + s.Require().Equal(sdk.MsgTypeURL(&types.MsgCreateValidator{}), sdk.MsgTypeURL(msgs[0])) + s.Require().True(val.Address.Equals(msgs[0].GetSigners()[0])) + s.Require().Equal(amount, msgs[0].(*types.MsgCreateValidator).Value) + s.Require().NoError(tx.ValidateBasic()) +} + +func (s *IntegrationTestSuite) TestGenTxCmdPubkey() { + val := s.network.Validators[0] + dir := s.T().TempDir() + + cmd := cli.GenTxCmd( + simapp.ModuleBasics, + val.ClientCtx.TxConfig, + banktypes.GenesisBalancesIterator{}, + val.ClientCtx.HomeDir, + ) + + _, out := testutil.ApplyMockIO(cmd) + clientCtx := val.ClientCtx.WithOutput(out) + + ctx := context.Background() + ctx = context.WithValue(ctx, client.ClientContextKey, &clientCtx) + + amount := sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(12)) + genTxFile := filepath.Join(dir, "myTx") + + cmd.SetArgs([]string{ + fmt.Sprintf("--%s=%s", flags.FlagChainID, s.network.Config.ChainID), + fmt.Sprintf("--%s=%s", flags.FlagOutputDocument, genTxFile), + fmt.Sprintf("--%s={\"key\":\"BOIkjkFruMpfOFC9oNPhiJGfmY2pHF/gwHdLDLnrnS0=\"}", stakingcli.FlagPubKey), + val.Moniker, + amount.String(), + }) + s.Require().Error(cmd.ExecuteContext(ctx)) + + cmd.SetArgs([]string{ + fmt.Sprintf("--%s=%s", flags.FlagChainID, s.network.Config.ChainID), + fmt.Sprintf("--%s=%s", flags.FlagOutputDocument, genTxFile), + fmt.Sprintf("--%s={\"@type\":\"/cosmos.crypto.ed25519.PubKey\",\"key\":\"BOIkjkFruMpfOFC9oNPhiJGfmY2pHF/gwHdLDLnrnS0=\"}", stakingcli.FlagPubKey), + val.Moniker, + amount.String(), + }) + s.Require().NoError(cmd.ExecuteContext(ctx)) +} From 6a648fbb0675a262eab9b34a4f31883770c32651 Mon Sep 17 00:00:00 2001 From: leesj9476 Date: Mon, 8 Nov 2021 15:03:54 +0900 Subject: [PATCH 06/12] feat: add upgrade/migration fix lint and compile error --- crypto/keyring/keyring.go | 2 +- x/genutil/client/testutil/suite.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crypto/keyring/keyring.go b/crypto/keyring/keyring.go index 4ee4b3d096..183d35f86e 100644 --- a/crypto/keyring/keyring.go +++ b/crypto/keyring/keyring.go @@ -199,7 +199,7 @@ type keystore struct { func infoKey(name string) []byte { return []byte(fmt.Sprintf("%s.%s", name, infoSuffix)) } -func infoKeyBz(name string) []byte { return []byte(infoKey(name)) } +func infoKeyBz(name string) []byte { return infoKey(name) } func newKeystore(kr keyring.Keyring, opts ...Option) keystore { // Default options for keybase diff --git a/x/genutil/client/testutil/suite.go b/x/genutil/client/testutil/suite.go index ae7ae0e810..ff74c65d84 100644 --- a/x/genutil/client/testutil/suite.go +++ b/x/genutil/client/testutil/suite.go @@ -3,7 +3,7 @@ package testutil import ( "context" "fmt" - "io" + "io/ioutil" "os" "path/filepath" @@ -78,7 +78,7 @@ func (s *IntegrationTestSuite) TestGenTxCmd() { open, err := os.Open(genTxFile) s.Require().NoError(err) - all, err := io.ReadAll(open) + all, err := ioutil.ReadAll(open) s.Require().NoError(err) tx, err := val.ClientCtx.TxConfig.TxJSONDecoder()(all) From a526dab9006ea70378777234c76037d919207b99 Mon Sep 17 00:00:00 2001 From: leesj9476 Date: Tue, 9 Nov 2021 06:44:23 +0900 Subject: [PATCH 07/12] add upgrade/migration version map --- CHANGELOG.md | 2 +- client/docs/swagger-ui/swagger.yaml | 266 ++++++++++++++++ docs/core/proto-docs.md | 52 ++++ proto/lbm/upgrade/v1/query.proto | 21 ++ proto/lbm/upgrade/v1/upgrade.proto | 12 + simapp/app.go | 7 +- simapp/app_test.go | 78 ++++- tests/mocks/types_module_module.go | 15 +- types/module/configurator.go | 5 +- types/module/module.go | 104 ++++++- types/module/module_test.go | 4 +- x/bank/module.go | 8 +- x/genutil/client/cli/migrate.go | 8 +- x/genutil/types/types.go | 4 +- x/upgrade/abci_test.go | 17 +- x/upgrade/keeper/grpc_query.go | 23 ++ x/upgrade/keeper/grpc_query_test.go | 67 +++- x/upgrade/keeper/keeper.go | 76 ++++- x/upgrade/keeper/keeper_test.go | 27 +- x/upgrade/spec/01_concepts.md | 2 +- x/upgrade/types/handler.go | 22 +- x/upgrade/types/keys.go | 3 + x/upgrade/types/query.pb.go | 465 ++++++++++++++++++++++++++-- x/upgrade/types/query.pb.gw.go | 80 +++++ x/upgrade/types/upgrade.pb.go | 279 +++++++++++++++-- 25 files changed, 1548 insertions(+), 99 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aa89cbd202..5e8b33bdb6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ ### Features * (feat) [\#352] (https://github.com/line/lbm-sdk/pull/352) iavl, db & disk stats logging * (x/gov) [\#368](https://github.com/line/lbm-sdk/pull/368) Governance Split Votes, use `MsgWeightedVote` to send a split vote. Sending a regular `MsgVote` will convert the underlying vote option into a weighted vote with weight 1. -* (x) [\#373] (https://github.com/line/lbm-sdk/pull/373) To smoothen the update to the latest stable release, the SDK includes a set of CLI commands for managing migrations between SDK versions, under the `migrate` subcommand. Only migration scripts between stable releases are included. +* (x/upgrade) [\#373] (https://github.com/line/lbm-sdk/pull/373) To smoothen the update to the latest stable release, the SDK includes vesion map for managing migrations between SDK versions. ### Improvements * (slashing) [\#347](https://github.com/line/lbm-sdk/pull/347) Introduce VoterSetCounter diff --git a/client/docs/swagger-ui/swagger.yaml b/client/docs/swagger-ui/swagger.yaml index 323868add8..51d860828e 100644 --- a/client/docs/swagger-ui/swagger.yaml +++ b/client/docs/swagger-ui/swagger.yaml @@ -21989,6 +21989,237 @@ paths: } tags: - Query + /lbm/upgrade/v1/module_versions: + get: + summary: ModuleVersions queries the list of module versions from state. + operationId: ModuleVersions + responses: + '200': + description: A successful response. + schema: + type: object + properties: + module_versions: + type: array + items: + type: object + properties: + name: + type: string + title: name of the app module + version: + type: string + format: uint64 + title: consensus version of the app module + description: ModuleVersion specifies a module and its consensus version. + description: >- + module_versions is a list of module names with their consensus + versions. + description: >- + QueryModuleVersionsResponse is the response type for the + Query/ModuleVersions + + RPC method. + default: + description: An unexpected error response + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := ptypes.MarshalAny(foo) + ... + foo := &pb.Foo{} + if err := ptypes.UnmarshalAny(any, foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: module_name + description: |- + module_name is a field to query a specific module + consensus version from state. Leaving this empty will + fetch the full list of module versions from state. + in: query + required: false + type: string + tags: + - Query '/lbm/upgrade/v1/upgraded_consensus_state/{last_height}': get: summary: |- @@ -49077,6 +49308,17 @@ definitions: and can't be handled, they will be ignored description: TxBody is the body of a transaction that all signers sign over. + lbm.upgrade.v1.ModuleVersion: + type: object + properties: + name: + type: string + title: name of the app module + version: + type: string + format: uint64 + title: consensus version of the app module + description: ModuleVersion specifies a module and its consensus version. lbm.upgrade.v1.Plan: type: object properties: @@ -49537,6 +49779,30 @@ definitions: RPC method. + lbm.upgrade.v1.QueryModuleVersionsResponse: + type: object + properties: + module_versions: + type: array + items: + type: object + properties: + name: + type: string + title: name of the app module + version: + type: string + format: uint64 + title: consensus version of the app module + description: ModuleVersion specifies a module and its consensus version. + description: >- + module_versions is a list of module names with their consensus + versions. + description: >- + QueryModuleVersionsResponse is the response type for the + Query/ModuleVersions + + RPC method. lbm.upgrade.v1.QueryUpgradedConsensusStateResponse: type: object properties: diff --git a/docs/core/proto-docs.md b/docs/core/proto-docs.md index 3a5bc94359..1ab8f2d959 100644 --- a/docs/core/proto-docs.md +++ b/docs/core/proto-docs.md @@ -673,6 +673,7 @@ - [lbm/upgrade/v1/upgrade.proto](#lbm/upgrade/v1/upgrade.proto) - [CancelSoftwareUpgradeProposal](#lbm.upgrade.v1.CancelSoftwareUpgradeProposal) + - [ModuleVersion](#lbm.upgrade.v1.ModuleVersion) - [Plan](#lbm.upgrade.v1.Plan) - [SoftwareUpgradeProposal](#lbm.upgrade.v1.SoftwareUpgradeProposal) @@ -681,6 +682,8 @@ - [QueryAppliedPlanResponse](#lbm.upgrade.v1.QueryAppliedPlanResponse) - [QueryCurrentPlanRequest](#lbm.upgrade.v1.QueryCurrentPlanRequest) - [QueryCurrentPlanResponse](#lbm.upgrade.v1.QueryCurrentPlanResponse) + - [QueryModuleVersionsRequest](#lbm.upgrade.v1.QueryModuleVersionsRequest) + - [QueryModuleVersionsResponse](#lbm.upgrade.v1.QueryModuleVersionsResponse) - [QueryUpgradedConsensusStateRequest](#lbm.upgrade.v1.QueryUpgradedConsensusStateRequest) - [QueryUpgradedConsensusStateResponse](#lbm.upgrade.v1.QueryUpgradedConsensusStateResponse) @@ -9894,6 +9897,22 @@ upgrade. + + +### ModuleVersion +ModuleVersion specifies a module and its consensus version. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `name` | [string](#string) | | name of the app module | +| `version` | [uint64](#uint64) | | consensus version of the app module | + + + + + + ### Plan @@ -10006,6 +10025,38 @@ method. + + +### QueryModuleVersionsRequest +QueryModuleVersionsRequest is the request type for the Query/ModuleVersions +RPC method. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `module_name` | [string](#string) | | module_name is a field to query a specific module consensus version from state. Leaving this empty will fetch the full list of module versions from state | + + + + + + + + +### QueryModuleVersionsResponse +QueryModuleVersionsResponse is the response type for the Query/ModuleVersions +RPC method. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `module_versions` | [ModuleVersion](#lbm.upgrade.v1.ModuleVersion) | repeated | module_versions is a list of module names with their consensus versions. | + + + + + + ### QueryUpgradedConsensusStateRequest @@ -10054,6 +10105,7 @@ Query defines the gRPC upgrade querier service. | `CurrentPlan` | [QueryCurrentPlanRequest](#lbm.upgrade.v1.QueryCurrentPlanRequest) | [QueryCurrentPlanResponse](#lbm.upgrade.v1.QueryCurrentPlanResponse) | CurrentPlan queries the current upgrade plan. | GET|/lbm/upgrade/v1/current_plan| | `AppliedPlan` | [QueryAppliedPlanRequest](#lbm.upgrade.v1.QueryAppliedPlanRequest) | [QueryAppliedPlanResponse](#lbm.upgrade.v1.QueryAppliedPlanResponse) | AppliedPlan queries a previously applied upgrade plan by its name. | GET|/lbm/upgrade/v1/applied_plan/{name}| | `UpgradedConsensusState` | [QueryUpgradedConsensusStateRequest](#lbm.upgrade.v1.QueryUpgradedConsensusStateRequest) | [QueryUpgradedConsensusStateResponse](#lbm.upgrade.v1.QueryUpgradedConsensusStateResponse) | UpgradedConsensusState queries the consensus state that will serve as a trusted kernel for the next version of this chain. It will only be stored at the last height of this chain. UpgradedConsensusState RPC not supported with legacy querier | GET|/lbm/upgrade/v1/upgraded_consensus_state/{last_height}| +| `ModuleVersions` | [QueryModuleVersionsRequest](#lbm.upgrade.v1.QueryModuleVersionsRequest) | [QueryModuleVersionsResponse](#lbm.upgrade.v1.QueryModuleVersionsResponse) | ModuleVersions queries the list of module versions from state. | GET|/lbm/upgrade/v1/module_versions| diff --git a/proto/lbm/upgrade/v1/query.proto b/proto/lbm/upgrade/v1/query.proto index 98574f92d0..e5ed946106 100644 --- a/proto/lbm/upgrade/v1/query.proto +++ b/proto/lbm/upgrade/v1/query.proto @@ -26,6 +26,11 @@ service Query { rpc UpgradedConsensusState(QueryUpgradedConsensusStateRequest) returns (QueryUpgradedConsensusStateResponse) { option (google.api.http).get = "/lbm/upgrade/v1/upgraded_consensus_state/{last_height}"; } + + // ModuleVersions queries the list of module versions from state. + rpc ModuleVersions(QueryModuleVersionsRequest) returns (QueryModuleVersionsResponse) { + option (google.api.http).get = "/lbm/upgrade/v1/module_versions"; + } } // QueryCurrentPlanRequest is the request type for the Query/CurrentPlan RPC @@ -66,3 +71,19 @@ message QueryUpgradedConsensusStateRequest { message QueryUpgradedConsensusStateResponse { google.protobuf.Any upgraded_consensus_state = 1; } + +// QueryModuleVersionsRequest is the request type for the Query/ModuleVersions +// RPC method. +message QueryModuleVersionsRequest { + // module_name is a field to query a specific module + // consensus version from state. Leaving this empty will + // fetch the full list of module versions from state + string module_name = 1; +} + +// QueryModuleVersionsResponse is the response type for the Query/ModuleVersions +// RPC method. +message QueryModuleVersionsResponse { + // module_versions is a list of module names with their consensus versions. + repeated ModuleVersion module_versions = 1; +} diff --git a/proto/lbm/upgrade/v1/upgrade.proto b/proto/lbm/upgrade/v1/upgrade.proto index 7da016a385..78ffcf8ce1 100644 --- a/proto/lbm/upgrade/v1/upgrade.proto +++ b/proto/lbm/upgrade/v1/upgrade.proto @@ -60,3 +60,15 @@ message CancelSoftwareUpgradeProposal { string title = 1; string description = 2; } + +// ModuleVersion specifies a module and its consensus version. +message ModuleVersion { + option (gogoproto.equal) = true; + option (gogoproto.goproto_stringer) = true; + + // name of the app module + string name = 1; + + // consensus version of the app module + uint64 version = 2; +} diff --git a/simapp/app.go b/simapp/app.go index 25df1fa23e..76303d1a98 100644 --- a/simapp/app.go +++ b/simapp/app.go @@ -368,7 +368,7 @@ func NewSimApp( app.mm.RegisterInvariants(&app.CrisisKeeper) app.mm.RegisterRoutes(app.Router(), app.QueryRouter(), encodingConfig.Amino) - app.configurator = module.NewConfigurator(app.MsgServiceRouter(), app.GRPCQueryRouter()) + app.configurator = module.NewConfigurator(app.appCodec, app.MsgServiceRouter(), app.GRPCQueryRouter()) app.mm.RegisterServices(app.configurator) // add test gRPC service for testing gRPC queries in isolation @@ -463,6 +463,7 @@ func (app *SimApp) InitChainer(ctx sdk.Context, req abci.RequestInitChain) abci. if err := ostjson.Unmarshal(req.AppStateBytes, &genesisState); err != nil { panic(err) } + app.UpgradeKeeper.SetModuleVersionMap(ctx, app.mm.GetVersionMap()) return app.mm.InitGenesis(ctx, app.appCodec, genesisState) } @@ -571,7 +572,7 @@ func (app *SimApp) RegisterTendermintService(clientCtx client.Context) { // Example: // cfg := module.NewConfigurator(...) // app.UpgradeKeeper.SetUpgradeHandler("store-migration", func(ctx sdk.Context, plan upgradetypes.Plan) { -// err := app.RunMigrations(ctx, module.MigrationMap{ +// err := app.RunMigrations(ctx, module.VersionMap{ // "bank": 1, // Migrate x/bank from v1 to current x/bank's ConsensusVersion // "staking": 8, // Migrate x/staking from v8 to current x/staking's ConsensusVersion // }) @@ -579,7 +580,7 @@ func (app *SimApp) RegisterTendermintService(clientCtx client.Context) { // panic(err) // } // }) -func (app *SimApp) RunMigrations(ctx sdk.Context, migrateFromVersions module.MigrationMap) error { +func (app *SimApp) RunMigrations(ctx sdk.Context, migrateFromVersions module.VersionMap) (module.VersionMap, error) { return app.mm.RunMigrations(ctx, app.configurator, migrateFromVersions) } diff --git a/simapp/app_test.go b/simapp/app_test.go index fb72ec7862..fa9f88f6de 100644 --- a/simapp/app_test.go +++ b/simapp/app_test.go @@ -5,12 +5,14 @@ import ( "os" "testing" + "github.com/golang/mock/gomock" "github.com/line/ostracon/libs/log" ocproto "github.com/line/ostracon/proto/ostracon/types" "github.com/line/tm-db/v2/memdb" "github.com/stretchr/testify/require" "github.com/line/lbm-sdk/baseapp" + "github.com/line/lbm-sdk/tests/mocks" sdk "github.com/line/lbm-sdk/types" "github.com/line/lbm-sdk/types/module" abci "github.com/line/ostracon/abci/types" @@ -81,7 +83,7 @@ func TestRunMigrations(t *testing.T) { bApp.SetCommitMultiStoreTracer(nil) bApp.SetInterfaceRegistry(encCfg.InterfaceRegistry) app.BaseApp = bApp - app.configurator = module.NewConfigurator(app.MsgServiceRouter(), app.GRPCQueryRouter()) + app.configurator = module.NewConfigurator(app.appCodec, app.MsgServiceRouter(), app.GRPCQueryRouter()) // We register all modules on the Configurator, except x/bank. x/bank will // serve as the test subject on which we run the migration tests. @@ -161,9 +163,9 @@ func TestRunMigrations(t *testing.T) { // Run migrations only for bank. That's why we put the initial // version for bank as 1, and for all other modules, we put as // their latest ConsensusVersion. - err = app.RunMigrations( + _, err = app.RunMigrations( app.NewContext(true, ocproto.Header{Height: app.LastBlockHeight()}), - module.MigrationMap{ + module.VersionMap{ "bank": 1, "auth": auth.AppModule{}.ConsensusVersion(), "staking": staking.AppModule{}.ConsensusVersion(), @@ -191,3 +193,73 @@ func TestRunMigrations(t *testing.T) { }) } } + +func TestInitGenesisOnMigration(t *testing.T) { + db := memdb.NewDB() + encCfg := MakeTestEncodingConfig() + logger := log.NewOCLogger(log.NewSyncWriter(os.Stdout)) + app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, 0, encCfg, EmptyAppOptions{}) + ctx := app.NewContext(true, ocproto.Header{Height: app.LastBlockHeight()}) + + // Create a mock module. This module will serve as the new module we're + // adding during a migration. + mockCtrl := gomock.NewController(t) + t.Cleanup(mockCtrl.Finish) + mockModule := mocks.NewMockAppModule(mockCtrl) + mockDefaultGenesis := json.RawMessage(`{"key": "value"}`) + mockModule.EXPECT().DefaultGenesis(gomock.Eq(app.appCodec)).Times(1).Return(mockDefaultGenesis) + mockModule.EXPECT().InitGenesis(gomock.Eq(ctx), gomock.Eq(app.appCodec), gomock.Eq(mockDefaultGenesis)).Times(1).Return(nil) + mockModule.EXPECT().ConsensusVersion().Times(1).Return(uint64(0)) + + app.mm.Modules["mock"] = mockModule + + // Run migrations only for "mock" module. We exclude it from + // the VersionMap to simulate upgrading with a new module. + _, err := app.RunMigrations( + app.NewContext(true, ocproto.Header{Height: app.LastBlockHeight()}), + module.VersionMap{ + "bank": 1, + "auth": auth.AppModule{}.ConsensusVersion(), + "staking": staking.AppModule{}.ConsensusVersion(), + "mint": mint.AppModule{}.ConsensusVersion(), + "distribution": distribution.AppModule{}.ConsensusVersion(), + "slashing": slashing.AppModule{}.ConsensusVersion(), + "gov": gov.AppModule{}.ConsensusVersion(), + "params": params.AppModule{}.ConsensusVersion(), + "ibc": ibc.AppModule{}.ConsensusVersion(), + "upgrade": upgrade.AppModule{}.ConsensusVersion(), + "vesting": vesting.AppModule{}.ConsensusVersion(), + "transfer": transfer.AppModule{}.ConsensusVersion(), + "evidence": evidence.AppModule{}.ConsensusVersion(), + "crisis": crisis.AppModule{}.ConsensusVersion(), + "genutil": genutil.AppModule{}.ConsensusVersion(), + "capability": capability.AppModule{}.ConsensusVersion(), + }, + ) + + require.NoError(t, err) +} + +func TestUpgradeStateOnGenesis(t *testing.T) { + encCfg := MakeTestEncodingConfig() + db := memdb.NewDB() + app := NewSimApp(log.NewOCLogger(log.NewSyncWriter(os.Stdout)), db, nil, true, map[int64]bool{}, DefaultNodeHome, 0, encCfg, EmptyAppOptions{}) + genesisState := NewDefaultGenesisState(encCfg.Marshaler) + stateBytes, err := json.MarshalIndent(genesisState, "", " ") + require.NoError(t, err) + + // Initialize the chain + app.InitChain( + abci.RequestInitChain{ + Validators: []abci.ValidatorUpdate{}, + AppStateBytes: stateBytes, + }, + ) + + // make sure the upgrade keeper has version map in state + ctx := app.NewContext(false, ocproto.Header{}) + vm := app.UpgradeKeeper.GetModuleVersionMap(ctx) + for v, i := range app.mm.Modules { + require.Equal(t, vm[v], i.ConsensusVersion()) + } +} diff --git a/tests/mocks/types_module_module.go b/tests/mocks/types_module_module.go index d73283d8d4..ffdcee0c2d 100644 --- a/tests/mocks/types_module_module.go +++ b/tests/mocks/types_module_module.go @@ -323,8 +323,19 @@ func (m *MockAppModuleGenesis) ExportGenesis(arg0 types0.Context, arg1 codec.JSO return ret0 } -// ConsensusVersion mocks base method -func (m *MockAppModule) ConsensusVersion() uint64 { return 1 } +// ConsensusVersion mocks base method. +func (m *MockAppModule) ConsensusVersion() uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ConsensusVersion") + ret0, _ := ret[0].(uint64) + return ret0 +} + +// ConsensusVersion indicates an expected call of ConsensusVersion. +func (mr *MockAppModuleMockRecorder) ConsensusVersion() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConsensusVersion", reflect.TypeOf((*MockAppModule)(nil).ConsensusVersion)) +} // ExportGenesis indicates an expected call of ExportGenesis func (mr *MockAppModuleGenesisMockRecorder) ExportGenesis(arg0, arg1 interface{}) *gomock.Call { diff --git a/types/module/configurator.go b/types/module/configurator.go index 335675cca0..136ab8ec51 100644 --- a/types/module/configurator.go +++ b/types/module/configurator.go @@ -3,6 +3,7 @@ package module import ( "github.com/gogo/protobuf/grpc" + "github.com/line/lbm-sdk/codec" sdk "github.com/line/lbm-sdk/types" sdkerrors "github.com/line/lbm-sdk/types/errors" ) @@ -34,6 +35,7 @@ type Configurator interface { } type configurator struct { + cdc codec.JSONMarshaler msgServer grpc.Server queryServer grpc.Server @@ -42,8 +44,9 @@ type configurator struct { } // NewConfigurator returns a new Configurator instance -func NewConfigurator(msgServer grpc.Server, queryServer grpc.Server) Configurator { +func NewConfigurator(cdc codec.JSONMarshaler, msgServer grpc.Server, queryServer grpc.Server) Configurator { return configurator{ + cdc: cdc, msgServer: msgServer, queryServer: queryServer, migrations: map[string]map[uint64]MigrationHandler{}, diff --git a/types/module/module.go b/types/module/module.go index a296d030d5..498ec1fb76 100644 --- a/types/module/module.go +++ b/types/module/module.go @@ -341,25 +341,99 @@ func (m *Manager) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) map[st // MigrationHandler is the migration function that each module registers. type MigrationHandler func(sdk.Context) error -// MigrationMap is a map of moduleName -> version, where version denotes the +// VersionMap is a map of moduleName -> version, where version denotes the // version from which we should perform the migration for each module. -type MigrationMap map[string]uint64 +type VersionMap map[string]uint64 -// RunMigrations performs in-place store migrations for all modules. -func (m Manager) RunMigrations(ctx sdk.Context, cfg Configurator, migrateFromVersions MigrationMap) error { +// RunMigrations performs in-place store migrations for all modules. This +// function MUST be called insde an x/upgrade UpgradeHandler. +// +// Recall that in an upgrade handler, the `fromVM` VersionMap is retrieved from +// x/upgrade's store, and the function needs to return the target VersionMap +// that will in turn be persisted to the x/upgrade's store. In general, +// returning RunMigrations should be enough: +// +// Example: +// cfg := module.NewConfigurator(...) +// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) { +// return app.mm.RunMigrations(ctx, cfg, fromVM) +// }) +// +// Internally, RunMigrations will perform the following steps: +// - create an `updatedVM` VersionMap of module with their latest ConsensusVersion +// - make a diff of `fromVM` and `udpatedVM`, and for each module: +// - if the module's `fromVM` version is less than its `updatedVM` version, +// then run in-place store migrations for that module between those versions. +// - if the module does not exist in the `fromVM` (which means that it's a new module, +// because it was not in the previous x/upgrade's store), then run +// `InitGenesis` on that module. +// - return the `updatedVM` to be persisted in the x/upgrade's store. +// +// As an app developer, if you wish to skip running InitGenesis for your new +// module "foo", you need to manually pass a `fromVM` argument to this function +// foo's module version set to its latest ConsensusVersion. That way, the diff +// between the function's `fromVM` and `udpatedVM` will be empty, hence not +// running anything for foo. +// +// Example: +// cfg := module.NewConfigurator(...) +// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) { +// // Assume "foo" is a new module. +// // `fromVM` is fetched from existing x/upgrade store. Since foo didn't exist +// // before this upgrade, `v, exists := fromVM["foo"]; exists == false`, and RunMigration will by default +// // run InitGenesis on foo. +// // To skip running foo's InitGenesis, you need set `fromVM`'s foo to its latest +// // consensus version: +// fromVM["foo"] = foo.AppModule{}.ConsensusVersion() +// +// return app.mm.RunMigrations(ctx, cfg, fromVM) +// }) +// +// Please also refer to docs/core/upgrade.md for more information. +func (m Manager) RunMigrations(ctx sdk.Context, cfg Configurator, fromVM VersionMap) (VersionMap, error) { c, ok := cfg.(configurator) if !ok { - return sdkerrors.Wrapf(sdkerrors.ErrInvalidType, "expected %T, got %T", configurator{}, cfg) + return nil, sdkerrors.Wrapf(sdkerrors.ErrInvalidType, "expected %T, got %T", configurator{}, cfg) } + updatedVM := make(VersionMap) for moduleName, module := range m.Modules { - err := c.runModuleMigrations(ctx, moduleName, migrateFromVersions[moduleName], module.ConsensusVersion()) - if err != nil { - return err + fromVersion, exists := fromVM[moduleName] + toVersion := module.ConsensusVersion() + + // Only run migrations when the module exists in the fromVM. + // Run InitGenesis otherwise. + // + // the module won't exist in the fromVM in two cases: + // 1. A new module is added. In this case we run InitGenesis with an + // empty genesis state. + // 2. An existing chain is upgrading to v043 for the first time. In this case, + // all modules have yet to be added to x/upgrade's VersionMap store. + if exists { + err := c.runModuleMigrations(ctx, moduleName, fromVersion, toVersion) + if err != nil { + return nil, err + } + } else { + cfgtor, ok := cfg.(configurator) + if !ok { + // Currently, the only implementator of Configurator (the interface) + // is configurator (the struct). + return nil, sdkerrors.Wrapf(sdkerrors.ErrInvalidType, "expected %T, got %T", configurator{}, cfg) + } + + moduleValUpdates := module.InitGenesis(ctx, cfgtor.cdc, module.DefaultGenesis(cfgtor.cdc)) + // The module manager assumes only one module will update the + // validator set, and that it will not be by a new module. + if len(moduleValUpdates) > 0 { + return nil, sdkerrors.Wrapf(sdkerrors.ErrLogic, "validator InitGenesis updates already set by a previous module") + } } + + updatedVM[moduleName] = toVersion } - return nil + return updatedVM, nil } // BeginBlock performs begin block functionality for all modules. It creates a @@ -403,3 +477,15 @@ func (m *Manager) EndBlock(ctx sdk.Context, req abci.RequestEndBlock) abci.Respo Events: ctx.EventManager().ABCIEvents(), } } + +// GetVersionMap gets consensus version from all modules +func (m *Manager) GetVersionMap() VersionMap { + vermap := make(VersionMap) + for _, v := range m.Modules { + version := v.ConsensusVersion() + name := v.Name() + vermap[name] = version + } + + return vermap +} diff --git a/types/module/module_test.go b/types/module/module_test.go index 99940abc1f..63c77f709b 100644 --- a/types/module/module_test.go +++ b/types/module/module_test.go @@ -179,7 +179,9 @@ func TestManager_RegisterQueryServices(t *testing.T) { msgRouter := mocks.NewMockServer(mockCtrl) queryRouter := mocks.NewMockServer(mockCtrl) - cfg := module.NewConfigurator(msgRouter, queryRouter) + interfaceRegistry := types.NewInterfaceRegistry() + cdc := codec.NewProtoCodec(interfaceRegistry) + cfg := module.NewConfigurator(cdc, msgRouter, queryRouter) mockAppModule1.EXPECT().RegisterServices(cfg).Times(1) mockAppModule2.EXPECT().RegisterServices(cfg).Times(1) diff --git a/x/bank/module.go b/x/bank/module.go index 1400f7a6f8..a30690ba42 100644 --- a/x/bank/module.go +++ b/x/bank/module.go @@ -101,10 +101,10 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) types.RegisterQueryServer(cfg.QueryServer(), am.keeper) - // m := keeper.NewMigrator(am.keeper.(keeper.BaseKeeper)) - // if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { - // panic(fmt.Sprintf("failed to migrate x/bank from version 1 to 2: %v", err)) - // } + m := keeper.NewMigrator(am.keeper.(keeper.BaseKeeper)) + if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil { + panic(fmt.Sprintf("failed to migrate x/bank from version 1 to 2: %v", err)) + } } // NewAppModule creates a new AppModule object diff --git a/x/genutil/client/cli/migrate.go b/x/genutil/client/cli/migrate.go index f0e28c1531..fa959abe18 100644 --- a/x/genutil/client/cli/migrate.go +++ b/x/genutil/client/cli/migrate.go @@ -21,20 +21,20 @@ const flagGenesisTime = "genesis-time" // Allow applications to extend and modify the migration process. // // Ref: https://github.com/cosmos/cosmos-sdk/issues/5041 -var migrationMap = types.MigrationMap{} +var VersionMap = types.VersionMap{} // GetMigrationCallback returns a MigrationCallback for a given version. func GetMigrationCallback(version string) types.MigrationCallback { - return migrationMap[version] + return VersionMap[version] } // GetMigrationVersions get all migration version in a sorted slice. func GetMigrationVersions() []string { - versions := make([]string, len(migrationMap)) + versions := make([]string, len(VersionMap)) var i int - for version := range migrationMap { + for version := range VersionMap { versions[i] = version i++ } diff --git a/x/genutil/types/types.go b/x/genutil/types/types.go index 12e7cbc4b5..bff243befd 100644 --- a/x/genutil/types/types.go +++ b/x/genutil/types/types.go @@ -19,8 +19,8 @@ type ( // TODO: MigrationCallback should also return an error upon failure. MigrationCallback func(AppMap, client.Context) AppMap - // MigrationMap defines a mapping from a version to a MigrationCallback. - MigrationMap map[string]MigrationCallback + // VersionMap defines a mapping from a version to a MigrationCallback. + VersionMap map[string]MigrationCallback ) // ModuleName is genutil diff --git a/x/upgrade/abci_test.go b/x/upgrade/abci_test.go index 355b38e215..6eb6845c05 100644 --- a/x/upgrade/abci_test.go +++ b/x/upgrade/abci_test.go @@ -156,7 +156,9 @@ func VerifyDoIBCUpgrade(t *testing.T) { }) t.Log("Verify that the upgrade can be successfully applied with a handler") - s.keeper.SetUpgradeHandler("test", func(ctx sdk.Context, plan types.Plan) {}) + s.keeper.SetUpgradeHandler("test", func(ctx sdk.Context, plan types.Plan, vm module.VersionMap) (module.VersionMap, error) { + return vm, nil + }) require.NotPanics(t, func() { s.module.BeginBlock(newCtx, req) }) @@ -183,7 +185,9 @@ func VerifyDoUpgrade(t *testing.T) { }) t.Log("Verify that the upgrade can be successfully applied with a handler") - s.keeper.SetUpgradeHandler("test", func(ctx sdk.Context, plan types.Plan) {}) + s.keeper.SetUpgradeHandler("test", func(ctx sdk.Context, plan types.Plan, vm module.VersionMap) (module.VersionMap, error) { + return vm, nil + }) require.NotPanics(t, func() { s.module.BeginBlock(newCtx, req) }) @@ -199,7 +203,9 @@ func VerifyDoUpgradeWithCtx(t *testing.T, newCtx sdk.Context, proposalName strin }) t.Log("Verify that the upgrade can be successfully applied with a handler") - s.keeper.SetUpgradeHandler(proposalName, func(ctx sdk.Context, plan types.Plan) {}) + s.keeper.SetUpgradeHandler(proposalName, func(ctx sdk.Context, plan types.Plan, vm module.VersionMap) (module.VersionMap, error) { + return vm, nil + }) require.NotPanics(t, func() { s.module.BeginBlock(newCtx, req) }) @@ -211,7 +217,10 @@ func TestHaltIfTooNew(t *testing.T) { s := setupTest(10, map[int64]bool{}) t.Log("Verify that we don't panic with registered plan not in database at all") var called int - s.keeper.SetUpgradeHandler("future", func(ctx sdk.Context, plan types.Plan) { called++ }) + s.keeper.SetUpgradeHandler("future", func(ctx sdk.Context, plan types.Plan, vm module.VersionMap) (module.VersionMap, error) { + called++ + return vm, nil + }) newCtx := s.ctx.WithBlockHeight(s.ctx.BlockHeight() + 1).WithBlockTime(time.Now()) req := abci.RequestBeginBlock{Header: newCtx.BlockHeader()} diff --git a/x/upgrade/keeper/grpc_query.go b/x/upgrade/keeper/grpc_query.go index c4516cdea4..c2e75ddb13 100644 --- a/x/upgrade/keeper/grpc_query.go +++ b/x/upgrade/keeper/grpc_query.go @@ -4,6 +4,7 @@ import ( "context" sdk "github.com/line/lbm-sdk/types" + "github.com/line/lbm-sdk/types/errors" clienttypes "github.com/line/lbm-sdk/x/ibc/core/02-client/types" "github.com/line/lbm-sdk/x/upgrade/types" ) @@ -52,3 +53,25 @@ func (k Keeper) UpgradedConsensusState(c context.Context, req *types.QueryUpgrad UpgradedConsensusState: cs, }, nil } + +// ModuleVersions implements the Query/QueryModuleVersions gRPC method +func (k Keeper) ModuleVersions(c context.Context, req *types.QueryModuleVersionsRequest) (*types.QueryModuleVersionsResponse, error) { + ctx := sdk.UnwrapSDKContext(c) + + // check if a specific module was requested + if len(req.ModuleName) > 0 { + if version, ok := k.getModuleVersion(ctx, req.ModuleName); ok { + // return the requested module + res := []*types.ModuleVersion{{Name: req.ModuleName, Version: version}} + return &types.QueryModuleVersionsResponse{ModuleVersions: res}, nil + } + // module requested, but not found + return nil, errors.Wrapf(errors.ErrNotFound, "x/upgrade: QueryModuleVersions module %s not found", req.ModuleName) + } + + // if no module requested return all module versions from state + mv := k.GetModuleVersions(ctx) + return &types.QueryModuleVersionsResponse{ + ModuleVersions: mv, + }, nil +} diff --git a/x/upgrade/keeper/grpc_query_test.go b/x/upgrade/keeper/grpc_query_test.go index 1a6d4fb51c..4bf31a5cec 100644 --- a/x/upgrade/keeper/grpc_query_test.go +++ b/x/upgrade/keeper/grpc_query_test.go @@ -11,6 +11,7 @@ import ( "github.com/line/lbm-sdk/baseapp" "github.com/line/lbm-sdk/simapp" sdk "github.com/line/lbm-sdk/types" + "github.com/line/lbm-sdk/types/module" "github.com/line/lbm-sdk/x/upgrade/types" ) @@ -110,7 +111,9 @@ func (suite *UpgradeTestSuite) TestAppliedCurrentPlan() { suite.app.UpgradeKeeper.ScheduleUpgrade(suite.ctx, plan) suite.ctx = suite.ctx.WithBlockHeight(expHeight) - suite.app.UpgradeKeeper.SetUpgradeHandler(planName, func(ctx sdk.Context, plan types.Plan) {}) + suite.app.UpgradeKeeper.SetUpgradeHandler(planName, func(ctx sdk.Context, plan types.Plan, vm module.VersionMap) (module.VersionMap, error) { + return vm, nil + }) suite.app.UpgradeKeeper.ApplyUpgrade(suite.ctx, plan) req = &types.QueryAppliedPlanRequest{Name: planName} @@ -138,6 +141,68 @@ func (suite *UpgradeTestSuite) TestAppliedCurrentPlan() { } } +func (suite *UpgradeTestSuite) TestModuleVersions() { + testCases := []struct { + msg string + req types.QueryModuleVersionsRequest + single bool + expPass bool + }{ + { + msg: "test full query", + req: types.QueryModuleVersionsRequest{}, + single: false, + expPass: true, + }, + { + msg: "test single module", + req: types.QueryModuleVersionsRequest{ModuleName: "bank"}, + single: true, + expPass: true, + }, + { + msg: "test non-existent module", + req: types.QueryModuleVersionsRequest{ModuleName: "abcdefg"}, + single: true, + expPass: false, + }, + } + + vm := suite.app.UpgradeKeeper.GetModuleVersionMap(suite.ctx) + mv := suite.app.UpgradeKeeper.GetModuleVersions(suite.ctx) + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + res, err := suite.queryClient.ModuleVersions(gocontext.Background(), &tc.req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + + if tc.single { + // test that the single module response is valid + suite.Require().Len(res.ModuleVersions, 1) + // make sure we got the right values + suite.Require().Equal(vm[tc.req.ModuleName], res.ModuleVersions[0].Version) + suite.Require().Equal(tc.req.ModuleName, res.ModuleVersions[0].Name) + } else { + // check that the full response is valid + suite.Require().NotEmpty(res.ModuleVersions) + suite.Require().Equal(len(mv), len(res.ModuleVersions)) + for i, v := range res.ModuleVersions { + suite.Require().Equal(mv[i].Version, v.Version) + suite.Require().Equal(mv[i].Name, v.Name) + } + } + } else { + suite.Require().Error(err) + } + }) + } +} + func TestUpgradeTestSuite(t *testing.T) { suite.Run(t, new(UpgradeTestSuite)) } diff --git a/x/upgrade/keeper/keeper.go b/x/upgrade/keeper/keeper.go index f429753214..8734ac8723 100644 --- a/x/upgrade/keeper/keeper.go +++ b/x/upgrade/keeper/keeper.go @@ -16,6 +16,7 @@ import ( store "github.com/line/lbm-sdk/store/types" sdk "github.com/line/lbm-sdk/types" sdkerrors "github.com/line/lbm-sdk/types/errors" + "github.com/line/lbm-sdk/types/module" clienttypes "github.com/line/lbm-sdk/x/ibc/core/02-client/types" ibcexported "github.com/line/lbm-sdk/x/ibc/core/exported" "github.com/line/lbm-sdk/x/upgrade/types" @@ -50,6 +51,74 @@ func (k Keeper) SetUpgradeHandler(name string, upgradeHandler types.UpgradeHandl k.upgradeHandlers[name] = upgradeHandler } +// SetModuleVersionMap saves a given version map to state +func (k Keeper) SetModuleVersionMap(ctx sdk.Context, vm module.VersionMap) { + if len(vm) > 0 { + store := ctx.KVStore(k.storeKey) + versionStore := prefix.NewStore(store, []byte{types.VersionMapByte}) + for modName, ver := range vm { + nameBytes := []byte(modName) + verBytes := make([]byte, 8) + binary.BigEndian.PutUint64(verBytes, ver) + versionStore.Set(nameBytes, verBytes) + } + } +} + +// GetModuleVersionMap returns a map of key module name and value module consensus version +// as defined in ADR-041. +func (k Keeper) GetModuleVersionMap(ctx sdk.Context) module.VersionMap { + store := ctx.KVStore(k.storeKey) + it := sdk.KVStorePrefixIterator(store, []byte{types.VersionMapByte}) + + vm := make(module.VersionMap) + defer it.Close() + for ; it.Valid(); it.Next() { + moduleBytes := it.Key() + // first byte is prefix key, so we remove it here + name := string(moduleBytes[1:]) + moduleVersion := binary.BigEndian.Uint64(it.Value()) + vm[name] = moduleVersion + } + + return vm +} + +// GetModuleVersions gets a slice of module consensus versions +func (k Keeper) GetModuleVersions(ctx sdk.Context) []*types.ModuleVersion { + store := ctx.KVStore(k.storeKey) + it := sdk.KVStorePrefixIterator(store, []byte{types.VersionMapByte}) + defer it.Close() + + mv := make([]*types.ModuleVersion, 0) + for ; it.Valid(); it.Next() { + moduleBytes := it.Key() + name := string(moduleBytes[1:]) + moduleVersion := binary.BigEndian.Uint64(it.Value()) + mv = append(mv, &types.ModuleVersion{ + Name: name, + Version: moduleVersion, + }) + } + return mv +} + +// gets the version for a given module, and returns true if it exists, false otherwise +func (k Keeper) getModuleVersion(ctx sdk.Context, name string) (uint64, bool) { + store := ctx.KVStore(k.storeKey) + it := sdk.KVStorePrefixIterator(store, []byte{types.VersionMapByte}) + defer it.Close() + + for ; it.Valid(); it.Next() { + moduleName := string(it.Key()[1:]) + if moduleName == name { + version := binary.BigEndian.Uint64(it.Value()) + return version, true + } + } + return 0, false +} + // ScheduleUpgrade schedules an upgrade based on the specified plan. // If there is another Plan already scheduled, it will overwrite it // (implicitly cancelling the current plan) @@ -219,7 +288,12 @@ func (k Keeper) ApplyUpgrade(ctx sdk.Context, plan types.Plan) { panic("ApplyUpgrade should never be called without first checking HasHandler") } - handler(ctx, plan) + updatedVM, err := handler(ctx, plan, k.GetModuleVersionMap(ctx)) + if err != nil { + panic(err) + } + + k.SetModuleVersionMap(ctx, updatedVM) // Must clear IBC state after upgrade is applied as it is stored separately from the upgrade plan. // This will prevent resubmission of upgrade msg after upgrade is already completed. diff --git a/x/upgrade/keeper/keeper_test.go b/x/upgrade/keeper/keeper_test.go index 4a4d5ef38b..ae5bbf6f64 100644 --- a/x/upgrade/keeper/keeper_test.go +++ b/x/upgrade/keeper/keeper_test.go @@ -11,6 +11,7 @@ import ( "github.com/line/lbm-sdk/simapp" store "github.com/line/lbm-sdk/store/types" sdk "github.com/line/lbm-sdk/types" + "github.com/line/lbm-sdk/types/module" clienttypes "github.com/line/lbm-sdk/x/ibc/core/02-client/types" commitmenttypes "github.com/line/lbm-sdk/x/ibc/core/23-commitment/types" ibcexported "github.com/line/lbm-sdk/x/ibc/core/exported" @@ -197,7 +198,9 @@ func (s *KeeperTestSuite) TestScheduleUpgrade() { Height: 123450000, }, setup: func() { - s.app.UpgradeKeeper.SetUpgradeHandler("all-good", func(_ sdk.Context, _ types.Plan) {}) + s.app.UpgradeKeeper.SetUpgradeHandler("all-good", func(ctx sdk.Context, plan types.Plan, vm module.VersionMap) (module.VersionMap, error) { + return vm, nil + }) s.app.UpgradeKeeper.ApplyUpgrade(s.ctx, types.Plan{ Name: "all-good", Info: "some text here", @@ -296,6 +299,28 @@ func (s *KeeperTestSuite) TestSetUpgradedClient() { } +// Tests that the underlying state of x/upgrade is set correctly after +// an upgrade. +func (s *KeeperTestSuite) TestMigrations() { + initialVM := module.VersionMap{"bank": uint64(1)} + s.app.UpgradeKeeper.SetModuleVersionMap(s.ctx, initialVM) + vmBefore := s.app.UpgradeKeeper.GetModuleVersionMap(s.ctx) + s.app.UpgradeKeeper.SetUpgradeHandler("dummy", func(_ sdk.Context, _ types.Plan, vm module.VersionMap) (module.VersionMap, error) { + // simulate upgrading the bank module + vm["bank"] = vm["bank"] + 1 + return vm, nil + }) + dummyPlan := types.Plan{ + Name: "dummy", + Info: "some text here", + Height: 123450000, + } + + s.app.UpgradeKeeper.ApplyUpgrade(s.ctx, dummyPlan) + vm := s.app.UpgradeKeeper.GetModuleVersionMap(s.ctx) + s.Require().Equal(vmBefore["bank"]+1, vm["bank"]) +} + func TestKeeperTestSuite(t *testing.T) { suite.Run(t, new(KeeperTestSuite)) } diff --git a/x/upgrade/spec/01_concepts.md b/x/upgrade/spec/01_concepts.md index 54147f8b5e..a565b95ad5 100644 --- a/x/upgrade/spec/01_concepts.md +++ b/x/upgrade/spec/01_concepts.md @@ -48,7 +48,7 @@ and not defined on a per-module basis. Registering a `Handler` is done via `Keeper#SetUpgradeHandler` in the application. ```go -type UpgradeHandler func(Context, Plan) +type UpgradeHandler func(Context, Plan, VersionMap) (VersionMap, error) ``` During each `EndBlock` execution, the `x/upgrade` module checks if there exists a diff --git a/x/upgrade/types/handler.go b/x/upgrade/types/handler.go index 3eddfc751a..f9deb23fa3 100644 --- a/x/upgrade/types/handler.go +++ b/x/upgrade/types/handler.go @@ -2,7 +2,25 @@ package types import ( sdk "github.com/line/lbm-sdk/types" + "github.com/line/lbm-sdk/types/module" ) -// UpgradeHandler specifies the type of function that is called when an upgrade is applied -type UpgradeHandler func(ctx sdk.Context, plan Plan) +// UpgradeHandler specifies the type of function that is called when an upgrade +// is applied. +// +// `fromVM` is a VersionMap of moduleName to fromVersion (unit64), where +// fromVersion denotes the version from which we should migrate the module, the +// target version being the module's latest version in the return VersionMap, +// let's call it `toVM`. +// +// `fromVM` is retrieved from x/upgrade's store, whereas `toVM` is chosen +// arbitrarily by the app developer (and persisted to x/upgrade's store right +// after the upgrade handler runs). In general, `toVM` should map all modules +// to their latest ConsensusVersion so that x/upgrade can track each module's +// latest ConsensusVersion; `fromVM` can be left as-is, but can also be +// modified inside the upgrade handler, e.g. to skip running InitGenesis or +// migrations for certain modules when calling the `module.Manager#RunMigrations` +// function. +// +// Please also refer to docs/core/upgrade.md for more information. +type UpgradeHandler func(ctx sdk.Context, plan Plan, fromVM module.VersionMap) (module.VersionMap, error) diff --git a/x/upgrade/types/keys.go b/x/upgrade/types/keys.go index 410f63597c..2505bd5ce4 100644 --- a/x/upgrade/types/keys.go +++ b/x/upgrade/types/keys.go @@ -22,6 +22,9 @@ const ( // DoneByte is a prefix for to look up completed upgrade plan by name DoneByte = 0x1 + // VersionMapByte is a prefix to look up module names (key) and versions (value) + VersionMapByte = 0x2 + // KeyUpgradedIBCState is the key under which upgraded ibc state is stored in the upgrade store KeyUpgradedIBCState = "upgradedIBCState" diff --git a/x/upgrade/types/query.pb.go b/x/upgrade/types/query.pb.go index 350ec4f644..a62d39a943 100644 --- a/x/upgrade/types/query.pb.go +++ b/x/upgrade/types/query.pb.go @@ -302,6 +302,102 @@ func (m *QueryUpgradedConsensusStateResponse) GetUpgradedConsensusState() *types return nil } +// QueryModuleVersionsRequest is the request type for the Query/ModuleVersions +// RPC method. +type QueryModuleVersionsRequest struct { + // module_name is a field to query a specific module + // consensus version from state. Leaving this empty will + // fetch the full list of module versions from state + ModuleName string `protobuf:"bytes,1,opt,name=module_name,json=moduleName,proto3" json:"module_name,omitempty"` +} + +func (m *QueryModuleVersionsRequest) Reset() { *m = QueryModuleVersionsRequest{} } +func (m *QueryModuleVersionsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryModuleVersionsRequest) ProtoMessage() {} +func (*QueryModuleVersionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_be137e92226ff1e7, []int{6} +} +func (m *QueryModuleVersionsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryModuleVersionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryModuleVersionsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryModuleVersionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryModuleVersionsRequest.Merge(m, src) +} +func (m *QueryModuleVersionsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryModuleVersionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryModuleVersionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryModuleVersionsRequest proto.InternalMessageInfo + +func (m *QueryModuleVersionsRequest) GetModuleName() string { + if m != nil { + return m.ModuleName + } + return "" +} + +// QueryModuleVersionsResponse is the response type for the Query/ModuleVersions +// RPC method. +type QueryModuleVersionsResponse struct { + // module_versions is a list of module names with their consensus versions. + ModuleVersions []*ModuleVersion `protobuf:"bytes,1,rep,name=module_versions,json=moduleVersions,proto3" json:"module_versions,omitempty"` +} + +func (m *QueryModuleVersionsResponse) Reset() { *m = QueryModuleVersionsResponse{} } +func (m *QueryModuleVersionsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryModuleVersionsResponse) ProtoMessage() {} +func (*QueryModuleVersionsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_be137e92226ff1e7, []int{7} +} +func (m *QueryModuleVersionsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryModuleVersionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryModuleVersionsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryModuleVersionsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryModuleVersionsResponse.Merge(m, src) +} +func (m *QueryModuleVersionsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryModuleVersionsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryModuleVersionsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryModuleVersionsResponse proto.InternalMessageInfo + +func (m *QueryModuleVersionsResponse) GetModuleVersions() []*ModuleVersion { + if m != nil { + return m.ModuleVersions + } + return nil +} + func init() { proto.RegisterType((*QueryCurrentPlanRequest)(nil), "lbm.upgrade.v1.QueryCurrentPlanRequest") proto.RegisterType((*QueryCurrentPlanResponse)(nil), "lbm.upgrade.v1.QueryCurrentPlanResponse") @@ -309,42 +405,50 @@ func init() { proto.RegisterType((*QueryAppliedPlanResponse)(nil), "lbm.upgrade.v1.QueryAppliedPlanResponse") proto.RegisterType((*QueryUpgradedConsensusStateRequest)(nil), "lbm.upgrade.v1.QueryUpgradedConsensusStateRequest") proto.RegisterType((*QueryUpgradedConsensusStateResponse)(nil), "lbm.upgrade.v1.QueryUpgradedConsensusStateResponse") + proto.RegisterType((*QueryModuleVersionsRequest)(nil), "lbm.upgrade.v1.QueryModuleVersionsRequest") + proto.RegisterType((*QueryModuleVersionsResponse)(nil), "lbm.upgrade.v1.QueryModuleVersionsResponse") } func init() { proto.RegisterFile("lbm/upgrade/v1/query.proto", fileDescriptor_be137e92226ff1e7) } var fileDescriptor_be137e92226ff1e7 = []byte{ - // 479 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x4d, 0x6b, 0x13, 0x41, - 0x00, 0xcd, 0xda, 0x5a, 0x70, 0x02, 0x1e, 0x06, 0x89, 0xe9, 0x52, 0x56, 0x99, 0x2a, 0x0d, 0x48, - 0x67, 0x68, 0x0a, 0xe2, 0x49, 0x88, 0x55, 0xf0, 0x24, 0x1a, 0xf1, 0xe2, 0x25, 0xcc, 0x66, 0xc7, - 0xcd, 0xe2, 0x64, 0x66, 0xba, 0x33, 0x53, 0x0c, 0xa5, 0x17, 0x0f, 0xde, 0x04, 0xc1, 0x5f, 0xe4, - 0xcd, 0x63, 0xc1, 0x8b, 0x47, 0x49, 0xfa, 0x43, 0x64, 0x67, 0x67, 0x4b, 0x3e, 0x36, 0x11, 0x6f, - 0xc9, 0xbe, 0x37, 0xef, 0xbd, 0x9d, 0xf7, 0x16, 0x84, 0x3c, 0x1e, 0x13, 0xab, 0xd2, 0x9c, 0x26, - 0x8c, 0x9c, 0x1d, 0x91, 0x53, 0xcb, 0xf2, 0x09, 0x56, 0xb9, 0x34, 0x12, 0xde, 0xe6, 0xf1, 0x18, - 0x7b, 0x0c, 0x9f, 0x1d, 0x85, 0xbb, 0xa9, 0x94, 0x29, 0x67, 0xc4, 0xa1, 0xb1, 0xfd, 0x40, 0xa8, - 0xf0, 0xd4, 0x70, 0xcf, 0x43, 0x54, 0x65, 0x84, 0x0a, 0x21, 0x0d, 0x35, 0x99, 0x14, 0xba, 0x42, - 0x97, 0x4c, 0x2a, 0x4d, 0x87, 0xa2, 0x5d, 0x70, 0xf7, 0x4d, 0xe1, 0x7a, 0x62, 0xf3, 0x9c, 0x09, - 0xf3, 0x9a, 0x53, 0xd1, 0x67, 0xa7, 0x96, 0x69, 0x83, 0x9e, 0x83, 0xf6, 0x2a, 0xa4, 0x95, 0x14, - 0x9a, 0xc1, 0x0e, 0xd8, 0x56, 0x9c, 0x8a, 0x76, 0x70, 0x3f, 0xe8, 0x34, 0xbb, 0x77, 0xf0, 0x62, - 0x58, 0xec, 0xb8, 0x8e, 0x81, 0x0e, 0xbd, 0x41, 0x4f, 0x29, 0x9e, 0xb1, 0x64, 0xce, 0x00, 0x42, - 0xb0, 0x2d, 0xe8, 0x98, 0x39, 0x91, 0x5b, 0x7d, 0xf7, 0x1b, 0x75, 0xbd, 0xe9, 0x02, 0xdd, 0x9b, - 0xb6, 0xc0, 0xce, 0x88, 0x65, 0xe9, 0xc8, 0xb8, 0x13, 0x5b, 0x7d, 0xff, 0x0f, 0xbd, 0x00, 0xc8, - 0x9d, 0x79, 0x57, 0x06, 0x48, 0x4e, 0x0a, 0xb6, 0xd0, 0x56, 0xbf, 0x35, 0xd4, 0xb0, 0xca, 0xed, - 0x1e, 0x68, 0x72, 0xaa, 0xcd, 0x60, 0x41, 0x02, 0x14, 0x8f, 0x5e, 0x96, 0x32, 0x16, 0xec, 0x6f, - 0x94, 0xf1, 0x29, 0x5e, 0x81, 0xb6, 0x7f, 0xd3, 0x64, 0x30, 0xac, 0x28, 0x03, 0x5d, 0x70, 0xae, - 0xaf, 0xa3, 0x2c, 0x04, 0x57, 0x5d, 0xe1, 0x9e, 0x98, 0xf4, 0x5b, 0xb6, 0x56, 0xb7, 0x7b, 0xb5, - 0x05, 0x6e, 0x3a, 0x5f, 0xf8, 0x25, 0x00, 0xcd, 0xb9, 0xcb, 0x86, 0x07, 0xcb, 0xd7, 0xba, 0xa6, - 0xa9, 0xb0, 0xf3, 0x6f, 0x62, 0x19, 0x1e, 0x3d, 0xf8, 0xfc, 0xeb, 0xea, 0xfb, 0x8d, 0x08, 0xee, - 0x91, 0xa5, 0x55, 0x0c, 0x4b, 0xf2, 0xa0, 0xe8, 0x0c, 0x7e, 0x0d, 0x40, 0x73, 0xae, 0x80, 0x35, - 0x41, 0x56, 0x1b, 0x5d, 0x13, 0xa4, 0xa6, 0x4b, 0xf4, 0xc8, 0x05, 0x79, 0x08, 0xf7, 0x97, 0x83, - 0xd0, 0x92, 0xec, 0x82, 0x90, 0xf3, 0x62, 0x13, 0x17, 0xf0, 0x47, 0x00, 0x5a, 0xf5, 0xad, 0xc0, - 0x6e, 0xad, 0xe3, 0xc6, 0x25, 0x84, 0xc7, 0xff, 0x75, 0xc6, 0x07, 0x7e, 0xea, 0x02, 0x3f, 0x81, - 0x8f, 0x49, 0xfd, 0xf7, 0xb4, 0x32, 0x06, 0x72, 0x3e, 0x37, 0xb7, 0x8b, 0x67, 0xbd, 0x9f, 0xd3, - 0x28, 0xb8, 0x9c, 0x46, 0xc1, 0x9f, 0x69, 0x14, 0x7c, 0x9b, 0x45, 0x8d, 0xcb, 0x59, 0xd4, 0xf8, - 0x3d, 0x8b, 0x1a, 0xef, 0x0f, 0xd2, 0xcc, 0x8c, 0x6c, 0x8c, 0x87, 0x72, 0x4c, 0x78, 0x26, 0x58, - 0x61, 0x70, 0xa8, 0x93, 0x8f, 0xe4, 0xd3, 0xb5, 0x8d, 0x99, 0x28, 0xa6, 0xe3, 0x1d, 0xb7, 0xa7, - 0xe3, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa8, 0x51, 0xe2, 0x6a, 0x37, 0x04, 0x00, 0x00, + // 568 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x41, 0x6b, 0x13, 0x41, + 0x18, 0xed, 0xda, 0x58, 0xf0, 0x0b, 0x44, 0x18, 0x24, 0xa6, 0x6b, 0xdd, 0xd6, 0xad, 0x92, 0x60, + 0xe9, 0x0e, 0x4d, 0x41, 0xbc, 0x28, 0xc4, 0xaa, 0x78, 0xb1, 0x68, 0x44, 0x0f, 0x5e, 0xc2, 0x24, + 0x3b, 0x26, 0x8b, 0xbb, 0xb3, 0xdb, 0x9d, 0x99, 0x60, 0x28, 0xbd, 0x78, 0xf0, 0x26, 0x88, 0xfe, + 0x22, 0x6f, 0x1e, 0x0b, 0x5e, 0x3c, 0x4a, 0xe2, 0x9f, 0xf0, 0x26, 0x3b, 0x3b, 0x29, 0xbb, 0x9b, + 0x4d, 0xd5, 0x5b, 0x32, 0xef, 0x7d, 0xef, 0xbd, 0x9d, 0xef, 0x0d, 0x98, 0x7e, 0x3f, 0xc0, 0x32, + 0x1a, 0xc6, 0xc4, 0xa5, 0x78, 0xbc, 0x87, 0x8f, 0x24, 0x8d, 0x27, 0x4e, 0x14, 0x87, 0x22, 0x44, + 0x35, 0xbf, 0x1f, 0x38, 0x1a, 0x73, 0xc6, 0x7b, 0xe6, 0xfa, 0x30, 0x0c, 0x87, 0x3e, 0xc5, 0x0a, + 0xed, 0xcb, 0x37, 0x98, 0x30, 0x4d, 0x35, 0x37, 0x34, 0x44, 0x22, 0x0f, 0x13, 0xc6, 0x42, 0x41, + 0x84, 0x17, 0x32, 0x3e, 0x47, 0x0b, 0x26, 0x73, 0x4d, 0x85, 0xda, 0xeb, 0x70, 0xf5, 0x79, 0xe2, + 0x7a, 0x20, 0xe3, 0x98, 0x32, 0xf1, 0xcc, 0x27, 0xac, 0x4b, 0x8f, 0x24, 0xe5, 0xc2, 0x7e, 0x08, + 0x8d, 0x45, 0x88, 0x47, 0x21, 0xe3, 0x14, 0xb5, 0xa0, 0x12, 0xf9, 0x84, 0x35, 0x8c, 0x2d, 0xa3, + 0x55, 0x6d, 0x5f, 0x71, 0xf2, 0x61, 0x1d, 0xc5, 0x55, 0x0c, 0x7b, 0x57, 0x1b, 0x74, 0xa2, 0xc8, + 0xf7, 0xa8, 0x9b, 0x31, 0x40, 0x08, 0x2a, 0x8c, 0x04, 0x54, 0x89, 0x5c, 0xea, 0xaa, 0xdf, 0x76, + 0x5b, 0x9b, 0xe6, 0xe8, 0xda, 0xb4, 0x0e, 0x6b, 0x23, 0xea, 0x0d, 0x47, 0x42, 0x4d, 0xac, 0x76, + 0xf5, 0x3f, 0xfb, 0x11, 0xd8, 0x6a, 0xe6, 0x65, 0x1a, 0xc0, 0x3d, 0x48, 0xd8, 0x8c, 0x4b, 0xfe, + 0x42, 0x10, 0x41, 0xe7, 0x6e, 0x9b, 0x50, 0xf5, 0x09, 0x17, 0xbd, 0x9c, 0x04, 0x24, 0x47, 0x4f, + 0x52, 0x19, 0x09, 0xdb, 0xe7, 0xca, 0xe8, 0x14, 0x87, 0xd0, 0xd0, 0x5f, 0xea, 0xf6, 0x06, 0x73, + 0x4a, 0x8f, 0x27, 0x9c, 0xb3, 0xeb, 0x48, 0x17, 0xe2, 0xcc, 0x77, 0xe5, 0x74, 0xd8, 0xa4, 0x5b, + 0x97, 0xa5, 0xba, 0xf6, 0x3d, 0x30, 0x95, 0xed, 0xd3, 0xd0, 0x95, 0x3e, 0x7d, 0x45, 0x63, 0x9e, + 0x2c, 0x2f, 0x93, 0x3a, 0x50, 0x40, 0x2f, 0x73, 0x55, 0x90, 0x1e, 0x1d, 0x26, 0x17, 0x46, 0xe1, + 0x5a, 0xe9, 0xb8, 0x4e, 0xfb, 0x18, 0x2e, 0xeb, 0xf9, 0xb1, 0x86, 0x1a, 0xc6, 0xd6, 0x6a, 0xab, + 0xda, 0xbe, 0x5e, 0xdc, 0x59, 0x4e, 0xa0, 0x5b, 0x0b, 0x72, 0x7a, 0xed, 0xdf, 0x15, 0xb8, 0xa8, + 0x7c, 0xd0, 0x07, 0x03, 0xaa, 0x99, 0x4a, 0xa0, 0x66, 0x51, 0x68, 0x49, 0x9f, 0xcc, 0xd6, 0xdf, + 0x89, 0x69, 0x68, 0xfb, 0xe6, 0xfb, 0xef, 0xbf, 0xbe, 0x5c, 0xb0, 0xd0, 0x06, 0x2e, 0x74, 0x77, + 0x90, 0x92, 0x7b, 0x49, 0xb3, 0xd0, 0x47, 0x03, 0xaa, 0x99, 0x9a, 0x2c, 0x09, 0xb2, 0xd8, 0xbb, + 0x25, 0x41, 0x4a, 0x1a, 0x67, 0xef, 0xa8, 0x20, 0xb7, 0xd0, 0x76, 0x31, 0x08, 0x49, 0xc9, 0x2a, + 0x08, 0x3e, 0x4e, 0x56, 0x73, 0x82, 0xbe, 0x1a, 0x50, 0x2f, 0xef, 0x0e, 0x6a, 0x97, 0x3a, 0x9e, + 0xdb, 0x57, 0x73, 0xff, 0xbf, 0x66, 0x74, 0xe0, 0xfb, 0x2a, 0xf0, 0x5d, 0x74, 0x07, 0x97, 0xbf, + 0xfa, 0x85, 0xca, 0xe2, 0xe3, 0xcc, 0xa3, 0x38, 0x41, 0x9f, 0x0d, 0xa8, 0xe5, 0x9b, 0x84, 0x6e, + 0x97, 0xe6, 0x28, 0x6d, 0xab, 0xb9, 0xf3, 0x4f, 0x5c, 0x9d, 0xb5, 0xa9, 0xb2, 0xde, 0x40, 0x9b, + 0xc5, 0xac, 0x85, 0xc2, 0x3e, 0xe8, 0x7c, 0x9b, 0x5a, 0xc6, 0xe9, 0xd4, 0x32, 0x7e, 0x4e, 0x2d, + 0xe3, 0xd3, 0xcc, 0x5a, 0x39, 0x9d, 0x59, 0x2b, 0x3f, 0x66, 0xd6, 0xca, 0xeb, 0xe6, 0xd0, 0x13, + 0x23, 0xd9, 0x77, 0x06, 0x61, 0x80, 0x7d, 0x8f, 0xd1, 0x44, 0x69, 0x97, 0xbb, 0x6f, 0xf1, 0xbb, + 0x33, 0x3d, 0x31, 0x89, 0x28, 0xef, 0xaf, 0xa9, 0xa7, 0xb8, 0xff, 0x27, 0x00, 0x00, 0xff, 0xff, + 0xfd, 0xe2, 0xab, 0x5d, 0x72, 0x05, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -368,6 +472,8 @@ type QueryClient interface { // stored at the last height of this chain. // UpgradedConsensusState RPC not supported with legacy querier UpgradedConsensusState(ctx context.Context, in *QueryUpgradedConsensusStateRequest, opts ...grpc.CallOption) (*QueryUpgradedConsensusStateResponse, error) + // ModuleVersions queries the list of module versions from state. + ModuleVersions(ctx context.Context, in *QueryModuleVersionsRequest, opts ...grpc.CallOption) (*QueryModuleVersionsResponse, error) } type queryClient struct { @@ -405,6 +511,15 @@ func (c *queryClient) UpgradedConsensusState(ctx context.Context, in *QueryUpgra return out, nil } +func (c *queryClient) ModuleVersions(ctx context.Context, in *QueryModuleVersionsRequest, opts ...grpc.CallOption) (*QueryModuleVersionsResponse, error) { + out := new(QueryModuleVersionsResponse) + err := c.cc.Invoke(ctx, "/lbm.upgrade.v1.Query/ModuleVersions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // QueryServer is the server API for Query service. type QueryServer interface { // CurrentPlan queries the current upgrade plan. @@ -416,6 +531,8 @@ type QueryServer interface { // stored at the last height of this chain. // UpgradedConsensusState RPC not supported with legacy querier UpgradedConsensusState(context.Context, *QueryUpgradedConsensusStateRequest) (*QueryUpgradedConsensusStateResponse, error) + // ModuleVersions queries the list of module versions from state. + ModuleVersions(context.Context, *QueryModuleVersionsRequest) (*QueryModuleVersionsResponse, error) } // UnimplementedQueryServer can be embedded to have forward compatible implementations. @@ -431,6 +548,9 @@ func (*UnimplementedQueryServer) AppliedPlan(ctx context.Context, req *QueryAppl func (*UnimplementedQueryServer) UpgradedConsensusState(ctx context.Context, req *QueryUpgradedConsensusStateRequest) (*QueryUpgradedConsensusStateResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UpgradedConsensusState not implemented") } +func (*UnimplementedQueryServer) ModuleVersions(ctx context.Context, req *QueryModuleVersionsRequest) (*QueryModuleVersionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ModuleVersions not implemented") +} func RegisterQueryServer(s grpc1.Server, srv QueryServer) { s.RegisterService(&_Query_serviceDesc, srv) @@ -490,6 +610,24 @@ func _Query_UpgradedConsensusState_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } +func _Query_ModuleVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryModuleVersionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ModuleVersions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/lbm.upgrade.v1.Query/ModuleVersions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ModuleVersions(ctx, req.(*QueryModuleVersionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Query_serviceDesc = grpc.ServiceDesc{ ServiceName: "lbm.upgrade.v1.Query", HandlerType: (*QueryServer)(nil), @@ -506,6 +644,10 @@ var _Query_serviceDesc = grpc.ServiceDesc{ MethodName: "UpgradedConsensusState", Handler: _Query_UpgradedConsensusState_Handler, }, + { + MethodName: "ModuleVersions", + Handler: _Query_ModuleVersions_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "lbm/upgrade/v1/query.proto", @@ -690,6 +832,73 @@ func (m *QueryUpgradedConsensusStateResponse) MarshalToSizedBuffer(dAtA []byte) return len(dAtA) - i, nil } +func (m *QueryModuleVersionsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryModuleVersionsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryModuleVersionsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ModuleName) > 0 { + i -= len(m.ModuleName) + copy(dAtA[i:], m.ModuleName) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ModuleName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryModuleVersionsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryModuleVersionsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryModuleVersionsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ModuleVersions) > 0 { + for iNdEx := len(m.ModuleVersions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ModuleVersions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { offset -= sovQuery(v) base := offset @@ -773,6 +982,34 @@ func (m *QueryUpgradedConsensusStateResponse) Size() (n int) { return n } +func (m *QueryModuleVersionsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ModuleName) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryModuleVersionsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ModuleVersions) > 0 { + for _, e := range m.ModuleVersions { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + func sovQuery(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1221,6 +1458,172 @@ func (m *QueryUpgradedConsensusStateResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *QueryModuleVersionsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryModuleVersionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryModuleVersionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ModuleName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ModuleName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryModuleVersionsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryModuleVersionsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryModuleVersionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ModuleVersions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ModuleVersions = append(m.ModuleVersions, &ModuleVersion{}) + if err := m.ModuleVersions[len(m.ModuleVersions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipQuery(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/x/upgrade/types/query.pb.gw.go b/x/upgrade/types/query.pb.gw.go index e8985834b6..64e2bdc13b 100644 --- a/x/upgrade/types/query.pb.gw.go +++ b/x/upgrade/types/query.pb.gw.go @@ -157,6 +157,42 @@ func local_request_Query_UpgradedConsensusState_0(ctx context.Context, marshaler } +var ( + filter_Query_ModuleVersions_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_ModuleVersions_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryModuleVersionsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ModuleVersions_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ModuleVersions(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ModuleVersions_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryModuleVersionsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ModuleVersions_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ModuleVersions(ctx, &protoReq) + return msg, metadata, err + +} + // RegisterQueryHandlerServer registers the http handlers for service Query to "mux". // UnaryRPC :call QueryServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. @@ -223,6 +259,26 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) + mux.Handle("GET", pattern_Query_ModuleVersions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ModuleVersions_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ModuleVersions_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -324,6 +380,26 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) + mux.Handle("GET", pattern_Query_ModuleVersions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ModuleVersions_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ModuleVersions_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -333,6 +409,8 @@ var ( pattern_Query_AppliedPlan_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"lbm", "upgrade", "v1", "applied_plan", "name"}, "", runtime.AssumeColonVerbOpt(true))) pattern_Query_UpgradedConsensusState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"lbm", "upgrade", "v1", "upgraded_consensus_state", "last_height"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_ModuleVersions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"lbm", "upgrade", "v1", "module_versions"}, "", runtime.AssumeColonVerbOpt(true))) ) var ( @@ -341,4 +419,6 @@ var ( forward_Query_AppliedPlan_0 = runtime.ForwardResponseMessage forward_Query_UpgradedConsensusState_0 = runtime.ForwardResponseMessage + + forward_Query_ModuleVersions_0 = runtime.ForwardResponseMessage ) diff --git a/x/upgrade/types/upgrade.pb.go b/x/upgrade/types/upgrade.pb.go index 52aa48150f..3507570a2c 100644 --- a/x/upgrade/types/upgrade.pb.go +++ b/x/upgrade/types/upgrade.pb.go @@ -166,43 +166,87 @@ func (m *CancelSoftwareUpgradeProposal) XXX_DiscardUnknown() { var xxx_messageInfo_CancelSoftwareUpgradeProposal proto.InternalMessageInfo +// ModuleVersion specifies a module and its consensus version. +type ModuleVersion struct { + // name of the app module + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // consensus version of the app module + Version uint64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` +} + +func (m *ModuleVersion) Reset() { *m = ModuleVersion{} } +func (m *ModuleVersion) String() string { return proto.CompactTextString(m) } +func (*ModuleVersion) ProtoMessage() {} +func (*ModuleVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_3f5591fe51496e2b, []int{3} +} +func (m *ModuleVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ModuleVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ModuleVersion.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ModuleVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_ModuleVersion.Merge(m, src) +} +func (m *ModuleVersion) XXX_Size() int { + return m.Size() +} +func (m *ModuleVersion) XXX_DiscardUnknown() { + xxx_messageInfo_ModuleVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_ModuleVersion proto.InternalMessageInfo + func init() { proto.RegisterType((*Plan)(nil), "lbm.upgrade.v1.Plan") proto.RegisterType((*SoftwareUpgradeProposal)(nil), "lbm.upgrade.v1.SoftwareUpgradeProposal") proto.RegisterType((*CancelSoftwareUpgradeProposal)(nil), "lbm.upgrade.v1.CancelSoftwareUpgradeProposal") + proto.RegisterType((*ModuleVersion)(nil), "lbm.upgrade.v1.ModuleVersion") } func init() { proto.RegisterFile("lbm/upgrade/v1/upgrade.proto", fileDescriptor_3f5591fe51496e2b) } var fileDescriptor_3f5591fe51496e2b = []byte{ - // 420 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x52, 0x3f, 0x6f, 0xd4, 0x30, - 0x1c, 0x8d, 0x69, 0x5a, 0x51, 0x57, 0x62, 0x30, 0x07, 0x84, 0x53, 0x71, 0xa2, 0x5b, 0xb8, 0x05, - 0x5b, 0x85, 0x05, 0x75, 0xe3, 0x3a, 0xb1, 0x55, 0x29, 0x2c, 0x48, 0xa8, 0x72, 0x12, 0x5f, 0xce, - 0xe0, 0x3f, 0x51, 0xe2, 0x2b, 0xe4, 0x13, 0xb0, 0xf6, 0x23, 0xf0, 0x71, 0x6e, 0xec, 0xd8, 0xa9, - 0xd0, 0xbb, 0x85, 0xb9, 0x9f, 0x00, 0xd9, 0x49, 0x10, 0xff, 0xc6, 0x6e, 0xcf, 0x2f, 0xef, 0xf7, - 0xde, 0xcf, 0x2f, 0x86, 0xfb, 0x32, 0x53, 0x74, 0x59, 0x95, 0x35, 0x2b, 0x38, 0x3d, 0x3b, 0x18, - 0x20, 0xa9, 0x6a, 0x63, 0x0d, 0xba, 0x27, 0x33, 0x45, 0x06, 0xea, 0xec, 0x60, 0xfc, 0xb8, 0x34, - 0xa6, 0x94, 0x9c, 0xfa, 0xaf, 0xd9, 0x72, 0x4e, 0x99, 0x6e, 0x3b, 0xe9, 0x78, 0x54, 0x9a, 0xd2, - 0x78, 0x48, 0x1d, 0xea, 0xd9, 0xf8, 0xef, 0x01, 0x2b, 0x14, 0x6f, 0x2c, 0x53, 0x55, 0x27, 0x98, - 0xdc, 0x00, 0x18, 0x1e, 0x4b, 0xa6, 0x11, 0x82, 0xa1, 0x66, 0x8a, 0x47, 0x20, 0x01, 0xd3, 0xdd, - 0xd4, 0x63, 0xf4, 0x12, 0x86, 0x4e, 0x1f, 0xdd, 0x49, 0xc0, 0x74, 0xef, 0xf9, 0x98, 0x74, 0x66, - 0x64, 0x30, 0x23, 0x6f, 0x06, 0xb3, 0xd9, 0xdd, 0xd5, 0x55, 0x1c, 0x9c, 0x7f, 0x8b, 0x41, 0xea, - 0x27, 0xd0, 0x43, 0xb8, 0xb3, 0xe0, 0xa2, 0x5c, 0xd8, 0x68, 0x2b, 0x01, 0xd3, 0xad, 0xb4, 0x3f, - 0xb9, 0x14, 0xa1, 0xe7, 0x26, 0x0a, 0xbb, 0x14, 0x87, 0xd1, 0x07, 0xf8, 0xa0, 0xbf, 0x62, 0x71, - 0x9a, 0x4b, 0xc1, 0xb5, 0x3d, 0x6d, 0x2c, 0xb3, 0x3c, 0xda, 0xf6, 0xb1, 0xa3, 0x7f, 0x62, 0x5f, - 0xe9, 0x76, 0x96, 0xdc, 0x5c, 0xc5, 0xfb, 0x2d, 0x53, 0xf2, 0x70, 0xf2, 0xdf, 0xe1, 0x49, 0x7a, - 0x7f, 0xe0, 0x8f, 0x3c, 0x7d, 0xe2, 0xd8, 0xc3, 0xf0, 0xc7, 0xd7, 0x18, 0x4c, 0xbe, 0x00, 0xf8, - 0xe8, 0xc4, 0xcc, 0xed, 0x27, 0x56, 0xf3, 0xb7, 0x9d, 0xea, 0xb8, 0x36, 0x95, 0x69, 0x98, 0x44, - 0x23, 0xb8, 0x6d, 0x85, 0x95, 0x43, 0x11, 0xdd, 0x01, 0x25, 0x70, 0xaf, 0xe0, 0x4d, 0x5e, 0x8b, - 0xca, 0x0a, 0xa3, 0x7d, 0x21, 0xbb, 0xe9, 0xef, 0x14, 0x22, 0x30, 0xac, 0x24, 0xd3, 0xfe, 0xbe, - 0x6e, 0xe9, 0x3f, 0xff, 0x1c, 0x71, 0x1d, 0xcf, 0x42, 0xd7, 0x52, 0xea, 0x75, 0xfd, 0x26, 0xef, - 0xe1, 0x93, 0x23, 0xa6, 0x73, 0x2e, 0x6f, 0x79, 0x9d, 0xce, 0x7e, 0xf6, 0x7a, 0x75, 0x8d, 0x83, - 0xcb, 0x6b, 0x1c, 0xac, 0xd6, 0x18, 0x5c, 0xac, 0x31, 0xf8, 0xbe, 0xc6, 0xe0, 0x7c, 0x83, 0x83, - 0x8b, 0x0d, 0x0e, 0x2e, 0x37, 0x38, 0x78, 0xf7, 0xb4, 0x14, 0x76, 0xb1, 0xcc, 0x48, 0x6e, 0x14, - 0x95, 0x42, 0x73, 0x2a, 0x33, 0xf5, 0xac, 0x29, 0x3e, 0xd2, 0xcf, 0xbf, 0x5e, 0xa5, 0x6d, 0x2b, - 0xde, 0x64, 0x3b, 0xbe, 0xfe, 0x17, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xda, 0xfd, 0x38, 0xfa, - 0xb1, 0x02, 0x00, 0x00, + // 455 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x52, 0x3d, 0x6f, 0xd3, 0x40, + 0x18, 0xf6, 0x51, 0xb7, 0xb4, 0x57, 0xc1, 0x70, 0x04, 0x30, 0x51, 0xb1, 0xad, 0x2c, 0x64, 0xe1, + 0x4e, 0x85, 0x05, 0x65, 0x23, 0x1d, 0x10, 0x03, 0x52, 0xe5, 0x02, 0x03, 0x12, 0xaa, 0xce, 0xf1, + 0xc5, 0x39, 0xb8, 0x0f, 0xcb, 0xbe, 0x04, 0xfc, 0x0b, 0x58, 0x3b, 0x32, 0xf6, 0xe7, 0x64, 0xec, + 0xd8, 0xa9, 0xd0, 0x64, 0x61, 0xee, 0x2f, 0x40, 0x77, 0xb6, 0x11, 0x1f, 0x1d, 0xd9, 0x9e, 0xf7, + 0xb9, 0xe7, 0x7d, 0xde, 0xaf, 0x83, 0x7b, 0x22, 0x95, 0x64, 0x5e, 0xe4, 0x25, 0xcd, 0x18, 0x59, + 0xec, 0x77, 0x10, 0x17, 0xa5, 0x36, 0x1a, 0xdd, 0x16, 0xa9, 0xc4, 0x1d, 0xb5, 0xd8, 0xef, 0x3f, + 0xc8, 0xb5, 0xce, 0x05, 0x23, 0xee, 0x35, 0x9d, 0x4f, 0x09, 0x55, 0x75, 0x23, 0xed, 0xf7, 0x72, + 0x9d, 0x6b, 0x07, 0x89, 0x45, 0x2d, 0x1b, 0xfd, 0x9d, 0x60, 0xb8, 0x64, 0x95, 0xa1, 0xb2, 0x68, + 0x04, 0x83, 0x2b, 0x00, 0xfd, 0x43, 0x41, 0x15, 0x42, 0xd0, 0x57, 0x54, 0xb2, 0x00, 0xc4, 0x60, + 0xb8, 0x93, 0x38, 0x8c, 0x9e, 0x41, 0xdf, 0xea, 0x83, 0x1b, 0x31, 0x18, 0xee, 0x3e, 0xe9, 0xe3, + 0xc6, 0x0c, 0x77, 0x66, 0xf8, 0x75, 0x67, 0x36, 0xde, 0x5e, 0x5e, 0x44, 0xde, 0xc9, 0xb7, 0x08, + 0x24, 0x2e, 0x03, 0xdd, 0x83, 0x5b, 0x33, 0xc6, 0xf3, 0x99, 0x09, 0x36, 0x62, 0x30, 0xdc, 0x48, + 0xda, 0xc8, 0x56, 0xe1, 0x6a, 0xaa, 0x03, 0xbf, 0xa9, 0x62, 0x31, 0xfa, 0x00, 0xef, 0xb6, 0x23, + 0x66, 0xc7, 0x13, 0xc1, 0x99, 0x32, 0xc7, 0x95, 0xa1, 0x86, 0x05, 0x9b, 0xae, 0x6c, 0xef, 0x9f, + 0xb2, 0xcf, 0x55, 0x3d, 0x8e, 0xaf, 0x2e, 0xa2, 0xbd, 0x9a, 0x4a, 0x31, 0x1a, 0x5c, 0x9b, 0x3c, + 0x48, 0xee, 0x74, 0xfc, 0x81, 0xa3, 0x8f, 0x2c, 0x3b, 0xf2, 0x7f, 0x9c, 0x46, 0x60, 0xf0, 0x05, + 0xc0, 0xfb, 0x47, 0x7a, 0x6a, 0x3e, 0xd1, 0x92, 0xbd, 0x69, 0x54, 0x87, 0xa5, 0x2e, 0x74, 0x45, + 0x05, 0xea, 0xc1, 0x4d, 0xc3, 0x8d, 0xe8, 0x16, 0xd1, 0x04, 0x28, 0x86, 0xbb, 0x19, 0xab, 0x26, + 0x25, 0x2f, 0x0c, 0xd7, 0xca, 0x2d, 0x64, 0x27, 0xf9, 0x9d, 0x42, 0x18, 0xfa, 0x85, 0xa0, 0xca, + 0xcd, 0x6b, 0x9b, 0xfe, 0xf3, 0x72, 0xd8, 0xee, 0x78, 0xec, 0xdb, 0x2d, 0x25, 0x4e, 0xd7, 0x76, + 0xf2, 0x1e, 0x3e, 0x3c, 0xa0, 0x6a, 0xc2, 0xc4, 0x7f, 0x6e, 0xa7, 0xb5, 0x7f, 0x01, 0x6f, 0xbd, + 0xd2, 0xd9, 0x5c, 0xb0, 0xb7, 0xac, 0xac, 0x6c, 0x97, 0xd7, 0x5d, 0x39, 0x80, 0x37, 0x17, 0xcd, + 0xb3, 0x33, 0xf2, 0x93, 0x2e, 0x1c, 0x6d, 0x7f, 0x3d, 0x8d, 0x80, 0x35, 0x1a, 0xbf, 0x5c, 0x5e, + 0x86, 0xde, 0xf9, 0x65, 0xe8, 0x2d, 0x57, 0x21, 0x38, 0x5b, 0x85, 0xe0, 0xfb, 0x2a, 0x04, 0x27, + 0xeb, 0xd0, 0x3b, 0x5b, 0x87, 0xde, 0xf9, 0x3a, 0xf4, 0xde, 0x3d, 0xca, 0xb9, 0x99, 0xcd, 0x53, + 0x3c, 0xd1, 0x92, 0x08, 0xae, 0x18, 0x11, 0xa9, 0x7c, 0x5c, 0x65, 0x1f, 0xc9, 0xe7, 0x5f, 0xdf, + 0xdb, 0xd4, 0x05, 0xab, 0xd2, 0x2d, 0x77, 0xc7, 0xa7, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x63, + 0x94, 0x3c, 0xad, 0xfa, 0x02, 0x00, 0x00, } func (this *Plan) Equal(that interface{}) bool { @@ -298,6 +342,33 @@ func (this *CancelSoftwareUpgradeProposal) Equal(that interface{}) bool { } return true } +func (this *ModuleVersion) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ModuleVersion) + if !ok { + that2, ok := that.(ModuleVersion) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Version != that1.Version { + return false + } + return true +} func (m *Plan) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -444,6 +515,41 @@ func (m *CancelSoftwareUpgradeProposal) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } +func (m *ModuleVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ModuleVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ModuleVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Version != 0 { + i = encodeVarintUpgrade(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintUpgrade(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintUpgrade(dAtA []byte, offset int, v uint64) int { offset -= sovUpgrade(v) base := offset @@ -517,6 +623,22 @@ func (m *CancelSoftwareUpgradeProposal) Size() (n int) { return n } +func (m *ModuleVersion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovUpgrade(uint64(l)) + } + if m.Version != 0 { + n += 1 + sovUpgrade(uint64(m.Version)) + } + return n +} + func sovUpgrade(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -986,6 +1108,107 @@ func (m *CancelSoftwareUpgradeProposal) Unmarshal(dAtA []byte) error { } return nil } +func (m *ModuleVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowUpgrade + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ModuleVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ModuleVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowUpgrade + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthUpgrade + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthUpgrade + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowUpgrade + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipUpgrade(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthUpgrade + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipUpgrade(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 From f0861434aa505b05540bc52bcff84dff80b6fcdb Mon Sep 17 00:00:00 2001 From: leesj9476 Date: Tue, 9 Nov 2021 06:48:42 +0900 Subject: [PATCH 08/12] change CHANGELOG pull number --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5e8b33bdb6..433eb26217 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ ### Features * (feat) [\#352] (https://github.com/line/lbm-sdk/pull/352) iavl, db & disk stats logging * (x/gov) [\#368](https://github.com/line/lbm-sdk/pull/368) Governance Split Votes, use `MsgWeightedVote` to send a split vote. Sending a regular `MsgVote` will convert the underlying vote option into a weighted vote with weight 1. -* (x/upgrade) [\#373] (https://github.com/line/lbm-sdk/pull/373) To smoothen the update to the latest stable release, the SDK includes vesion map for managing migrations between SDK versions. +* (x/upgrade) [\#377] (https://github.com/line/lbm-sdk/pull/377) To smoothen the update to the latest stable release, the SDK includes vesion map for managing migrations between SDK versions. ### Improvements * (slashing) [\#347](https://github.com/line/lbm-sdk/pull/347) Introduce VoterSetCounter From 742d960e2109b898697f5fcba16a3f4acc983646 Mon Sep 17 00:00:00 2001 From: leesj9476 Date: Tue, 9 Nov 2021 08:27:44 +0900 Subject: [PATCH 09/12] fix ST1022 lint --- x/genutil/client/cli/migrate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x/genutil/client/cli/migrate.go b/x/genutil/client/cli/migrate.go index fa959abe18..589df98e7d 100644 --- a/x/genutil/client/cli/migrate.go +++ b/x/genutil/client/cli/migrate.go @@ -18,7 +18,7 @@ import ( const flagGenesisTime = "genesis-time" -// Allow applications to extend and modify the migration process. +// VersionMap allow applications to extend and modify the migration process. // // Ref: https://github.com/cosmos/cosmos-sdk/issues/5041 var VersionMap = types.VersionMap{} From 3a6b38a7eb63ce726364b4b29618860b5c6f0464 Mon Sep 17 00:00:00 2001 From: leesj9476 Date: Tue, 9 Nov 2021 18:12:09 +0900 Subject: [PATCH 10/12] remove non-deterministic upgrade order --- store/rootmulti/store.go | 17 ++++++++++++++++- types/module/module.go | 14 +++++++++++++- x/upgrade/keeper/keeper.go | 14 +++++++++++++- 3 files changed, 42 insertions(+), 3 deletions(-) diff --git a/store/rootmulti/store.go b/store/rootmulti/store.go index f042fdc8ca..dafe78b031 100644 --- a/store/rootmulti/store.go +++ b/store/rootmulti/store.go @@ -192,7 +192,22 @@ func (rs *Store) loadVersion(ver int64, upgrades *types.StoreUpgrades) error { // load each Store (note this doesn't panic on unmounted keys now) var newStores = make(map[types.StoreKey]types.CommitKVStore) - for key, storeParams := range rs.storesParams { + storesKeys := make([]types.StoreKey, 0, len(rs.storesParams)) + + for key := range rs.storesParams { + storesKeys = append(storesKeys, key) + } + if upgrades != nil { + // deterministic iteration order for upgrades + // (as the underlyin store may change and + // upgrades make store change where the execution order may matter) + sort.Slice(storesKeys, func(i, j int) bool { + return storesKeys[i].Name() < storesKeys[j].Name() + }) + } + + for _, key := range storesKeys { + storeParams := rs.storesParams[key] commitID := rs.getCommitID(infos, key.Name()) // If it has been added, set the initial version diff --git a/types/module/module.go b/types/module/module.go index 498ec1fb76..e1811505c4 100644 --- a/types/module/module.go +++ b/types/module/module.go @@ -30,6 +30,7 @@ package module import ( "encoding/json" + "sort" "github.com/gorilla/mux" "github.com/grpc-ecosystem/grpc-gateway/runtime" @@ -397,7 +398,18 @@ func (m Manager) RunMigrations(ctx sdk.Context, cfg Configurator, fromVM Version } updatedVM := make(VersionMap) - for moduleName, module := range m.Modules { + // for deterministic iteration order + // (as some migrations depend on other modules + // and the order of executing migrations matters) + // TODO: make the order user-configurable? + sortedModNames := make([]string, 0, len(m.Modules)) + for key := range m.Modules { + sortedModNames = append(sortedModNames, key) + } + sort.Strings(sortedModNames) + + for _, moduleName := range sortedModNames { + module := m.Modules[moduleName] fromVersion, exists := fromVM[moduleName] toVersion := module.ConsensusVersion() diff --git a/x/upgrade/keeper/keeper.go b/x/upgrade/keeper/keeper.go index 8734ac8723..a40d9a07b7 100644 --- a/x/upgrade/keeper/keeper.go +++ b/x/upgrade/keeper/keeper.go @@ -7,6 +7,7 @@ import ( "os" "path" "path/filepath" + "sort" "github.com/line/ostracon/libs/log" ostos "github.com/line/ostracon/libs/os" @@ -56,7 +57,18 @@ func (k Keeper) SetModuleVersionMap(ctx sdk.Context, vm module.VersionMap) { if len(vm) > 0 { store := ctx.KVStore(k.storeKey) versionStore := prefix.NewStore(store, []byte{types.VersionMapByte}) - for modName, ver := range vm { + // Even though the underlying store (cachekv) store is sorted, we still + // prefer a deterministic iteration order of the map, to avoid undesired + // surprises if we ever change stores. + sortedModNames := make([]string, 0, len(vm)) + + for key := range vm { + sortedModNames = append(sortedModNames, key) + } + sort.Strings(sortedModNames) + + for _, modName := range sortedModNames { + ver := vm[modName] nameBytes := []byte(modName) verBytes := make([]byte, 8) binary.BigEndian.PutUint64(verBytes, ver) From be9fe4ad473f8ec9a8af682ec2eef4ad5064cbce Mon Sep 17 00:00:00 2001 From: leesj9476 Date: Tue, 9 Nov 2021 18:12:29 +0900 Subject: [PATCH 11/12] apply ostraconv1.0.1 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 28662096ea..8d72a46d8c 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/line/iavl/v2 v2.0.0-init.1.0.20211019080724-001e2272a25d - github.com/line/ostracon v1.0.1 + github.com/line/ostracon v1.0.2 github.com/line/tm-db/v2 v2.0.0-init.1.0.20210824011847-fcfa67dd3c70 github.com/line/wasmvm v0.14.0-0.8.0 github.com/magiconair/properties v1.8.5 diff --git a/go.sum b/go.sum index 70f46b0606..49c8140a2e 100644 --- a/go.sum +++ b/go.sum @@ -477,8 +477,8 @@ github.com/line/gorocksdb v0.0.0-20210406043732-d4bea34b6d55/go.mod h1:DHRJroSL7 github.com/line/iavl/v2 v2.0.0-init.1.0.20211019080724-001e2272a25d h1:mXlamMU/uWGrW+7BJCUroZYo8mZ2q2zYS88dmsS0Ous= github.com/line/iavl/v2 v2.0.0-init.1.0.20211019080724-001e2272a25d/go.mod h1:0Xz+0i1nlB9lrjUDEwpDRhcmjfEAkOjd20dRb40FBso= github.com/line/ostracon v0.34.9-0.20210429084710-ef4fe0a40c7d/go.mod h1:ttnbq+yQJMQ9a2MT5SEisOoa/+pOgh2KenTiK/rVdiw= -github.com/line/ostracon v1.0.1 h1:RDoYP3jqz4zCSOTu21D0rQ3Jcl6CcK4qsKTOZss1zQ0= -github.com/line/ostracon v1.0.1/go.mod h1:elTiUFLvBz6Yaze+ZZLlbUnhqKWLJ7cMy/P9rSabafQ= +github.com/line/ostracon v1.0.2 h1:sbZtNrLYFKDP74tXYgMVkrzKJKk2Sqn1Oczl1+f7sgI= +github.com/line/ostracon v1.0.2/go.mod h1:elTiUFLvBz6Yaze+ZZLlbUnhqKWLJ7cMy/P9rSabafQ= github.com/line/tm-db/v2 v2.0.0-init.1.0.20210413083915-5bb60e117524/go.mod h1:wmkyPabXjtVZ1dvRofmurjaceghywtCSYGqFuFS+TbI= github.com/line/tm-db/v2 v2.0.0-init.1.0.20210824011847-fcfa67dd3c70 h1:Izv/u19P8salnSZGAgNHFugNlzWLgREiL+AmWK8C/lE= github.com/line/tm-db/v2 v2.0.0-init.1.0.20210824011847-fcfa67dd3c70/go.mod h1:wmkyPabXjtVZ1dvRofmurjaceghywtCSYGqFuFS+TbI= From cd442e0fa124fbe1f7b4e76e5aaf923b6177155d Mon Sep 17 00:00:00 2001 From: leesj9476 Date: Wed, 10 Nov 2021 23:45:57 +0900 Subject: [PATCH 12/12] remove unintended commands about keyring --- client/keys/migrate.go | 147 ---------------------------- client/keys/migrate_test.go | 40 -------- client/keys/root.go | 1 - client/keys/root_test.go | 2 +- client/keys/utils.go | 14 --- crypto/keyring/keyring.go | 2 - crypto/keyring/legacy.go | 189 ------------------------------------ 7 files changed, 1 insertion(+), 394 deletions(-) delete mode 100644 client/keys/migrate.go delete mode 100644 client/keys/migrate_test.go delete mode 100644 crypto/keyring/legacy.go diff --git a/client/keys/migrate.go b/client/keys/migrate.go deleted file mode 100644 index 6cd7aade2e..0000000000 --- a/client/keys/migrate.go +++ /dev/null @@ -1,147 +0,0 @@ -package keys - -import ( - "bufio" - "fmt" - "io/ioutil" - "os" - - "github.com/pkg/errors" - "github.com/spf13/cobra" - - "github.com/line/lbm-sdk/client/flags" - "github.com/line/lbm-sdk/client/input" - "github.com/line/lbm-sdk/crypto/keyring" - sdk "github.com/line/lbm-sdk/types" -) - -// migratePassphrase is used as a no-op migration key passphrase as a passphrase -// is not needed for importing into the Keyring keystore. -const migratePassphrase = "NOOP_PASSPHRASE" - -// MigrateCommand migrates key information from legacy keybase to OS secret store. -func MigrateCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "migrate ", - Short: "Migrate keys from the legacy (db-based) Keybase", - Long: `Migrate key information from the legacy (db-based) Keybase to the new keyring-based Keyring. -The legacy Keybase used to persist keys in a LevelDB database stored in a 'keys' sub-directory of -the old client application's home directory, e.g. $HOME/.gaiacli/keys/. -For each key material entry, the command will prompt if the key should be skipped or not. If the key -is not to be skipped, the passphrase must be entered. The key will only be migrated if the passphrase -is correct. Otherwise, the command will exit and migration must be repeated. - -It is recommended to run in 'dry-run' mode first to verify all key migration material. -`, - Args: cobra.ExactArgs(1), - RunE: runMigrateCmd, - } - - cmd.Flags().Bool(flags.FlagDryRun, false, "Run migration without actually persisting any changes to the new Keybase") - return cmd -} - -func runMigrateCmd(cmd *cobra.Command, args []string) error { - rootDir, _ := cmd.Flags().GetString(flags.FlagHome) - - // instantiate legacy keybase - var legacyKb keyring.LegacyKeybase - legacyKb, err := NewLegacyKeyBaseFromDir(args[0]) - if err != nil { - return err - } - - defer func() { _ = legacyKb.Close() }() - - // fetch list of keys from legacy keybase - oldKeys, err := legacyKb.List() - if err != nil { - return err - } - - buf := bufio.NewReader(cmd.InOrStdin()) - keyringServiceName := sdk.KeyringServiceName() - - var ( - tmpDir string - migrator keyring.Importer - ) - - if dryRun, _ := cmd.Flags().GetBool(flags.FlagDryRun); dryRun { - tmpDir, err = ioutil.TempDir("", "migrator-migrate-dryrun") - if err != nil { - return errors.Wrap(err, "failed to create temporary directory for dryrun migration") - } - - defer func() { _ = os.RemoveAll(tmpDir) }() - - migrator, err = keyring.New(keyringServiceName, keyring.BackendTest, tmpDir, buf) - } else { - backend, _ := cmd.Flags().GetString(flags.FlagKeyringBackend) - migrator, err = keyring.New(keyringServiceName, backend, rootDir, buf) - } - - if err != nil { - return errors.Wrap(err, fmt.Sprintf( - "failed to initialize keybase for service %s at directory %s", - keyringServiceName, rootDir, - )) - } - - if len(oldKeys) == 0 { - cmd.PrintErrln("Migration Aborted: no keys to migrate") - return nil - } - - for _, oldInfo := range oldKeys { - keyName := oldInfo.GetName() - keyType := oldInfo.GetType() - - cmd.PrintErrf("Migrating key: '%s (%s)' ...\n", keyName, keyType) - - // allow user to skip migrating specific keys - ok, err := input.GetConfirmation("Skip key migration?", buf, cmd.ErrOrStderr()) - if err != nil { - return err - } - if ok { - continue - } - - // TypeLocal needs an additional step to ask password. - // The other keyring types are handled by ImportInfo. - if keyType != keyring.TypeLocal { - infoImporter, ok := migrator.(keyring.LegacyInfoImporter) - if !ok { - return fmt.Errorf("the Keyring implementation does not support import operations of Info types") - } - - if err = infoImporter.ImportInfo(oldInfo); err != nil { - return err - } - - continue - } - - password, err := input.GetPassword("Enter passphrase to decrypt key:", buf) - if err != nil { - return err - } - - // NOTE: A passphrase is not actually needed here as when the key information - // is imported into the Keyring-based Keybase it only needs the password - // (see: writeLocalKey). - armoredPriv, err := legacyKb.ExportPrivKey(keyName, password, migratePassphrase) - if err != nil { - return err - } - - if err := migrator.ImportPrivKey(keyName, armoredPriv, migratePassphrase); err != nil { - return err - } - - } - cmd.PrintErrln("Migration complete.") - - return err -} diff --git a/client/keys/migrate_test.go b/client/keys/migrate_test.go deleted file mode 100644 index 8b835af380..0000000000 --- a/client/keys/migrate_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package keys - -import ( - "context" - "fmt" - "testing" - - "github.com/line/lbm-sdk/client" - - "github.com/stretchr/testify/assert" - // "github.com/stretchr/testify/require" - - "github.com/line/lbm-sdk/client/flags" - "github.com/line/lbm-sdk/crypto/keyring" - "github.com/line/lbm-sdk/testutil" -) - -func Test_runMigrateCmd(t *testing.T) { - kbHome := t.TempDir() - clientCtx := client.Context{}.WithKeyringDir(kbHome) - ctx := context.WithValue(context.Background(), client.ClientContextKey, &clientCtx) - - // require.NoError(t, copy.Copy("testdata", kbHome)) - - cmd := MigrateCommand() - cmd.Flags().AddFlagSet(Commands("home").PersistentFlags()) - //mockIn := testutil.ApplyMockIODiscardOutErr(cmd) - mockIn, mockOut := testutil.ApplyMockIO(cmd) - - cmd.SetArgs([]string{ - kbHome, - //fmt.Sprintf("--%s=%s", flags.FlagHome, kbHome), - fmt.Sprintf("--%s=true", flags.FlagDryRun), - fmt.Sprintf("--%s=%s", flags.FlagKeyringBackend, keyring.BackendTest), - }) - - mockIn.Reset("\n12345678\n\n\n\n\n") - t.Log(mockOut.String()) - assert.NoError(t, cmd.ExecuteContext(ctx)) -} diff --git a/client/keys/root.go b/client/keys/root.go index e48b7baec0..c0a5da3da8 100644 --- a/client/keys/root.go +++ b/client/keys/root.go @@ -47,7 +47,6 @@ The pass backend requires GnuPG: https://gnupg.org/ flags.LineBreak, DeleteKeyCommand(), ParseKeyStringCommand(), - MigrateCommand(), ) cmd.PersistentFlags().String(flags.FlagHome, defaultNodeHome, "The application home directory") diff --git a/client/keys/root_test.go b/client/keys/root_test.go index f66ae9265d..b6c2f5f88f 100644 --- a/client/keys/root_test.go +++ b/client/keys/root_test.go @@ -11,5 +11,5 @@ func TestCommands(t *testing.T) { assert.NotNil(t, rootCommands) // Commands are registered - assert.Equal(t, 10, len(rootCommands.Commands())) + assert.Equal(t, 9, len(rootCommands.Commands())) } diff --git a/client/keys/utils.go b/client/keys/utils.go index 80a28ed133..d13e212338 100644 --- a/client/keys/utils.go +++ b/client/keys/utils.go @@ -3,7 +3,6 @@ package keys import ( "fmt" "io" - "path/filepath" yaml "gopkg.in/yaml.v2" @@ -14,23 +13,10 @@ import ( const ( OutputFormatText = "text" OutputFormatJSON = "json" - - // defaultKeyDBName is the client's subdirectory where keys are stored. - defaultKeyDBName = "keys" ) type bechKeyOutFn func(keyInfo cryptokeyring.Info) (cryptokeyring.KeyOutput, error) -// NewLegacyKeyBaseFromDir initializes a legacy keybase at the rootDir directory. Keybase -// options can be applied when generating this new Keybase. -func NewLegacyKeyBaseFromDir(rootDir string, opts ...cryptokeyring.KeybaseOption) (cryptokeyring.LegacyKeybase, error) { - return getLegacyKeyBaseFromDir(rootDir, opts...) -} - -func getLegacyKeyBaseFromDir(rootDir string, opts ...cryptokeyring.KeybaseOption) (cryptokeyring.LegacyKeybase, error) { - return cryptokeyring.NewLegacy(defaultKeyDBName, filepath.Join(rootDir, "keys"), opts...) -} - func printKeyInfo(w io.Writer, keyInfo cryptokeyring.Info, bechKeyOut bechKeyOutFn, output string) { ko, err := bechKeyOut(keyInfo) if err != nil { diff --git a/crypto/keyring/keyring.go b/crypto/keyring/keyring.go index 183d35f86e..4dffe640d1 100644 --- a/crypto/keyring/keyring.go +++ b/crypto/keyring/keyring.go @@ -199,8 +199,6 @@ type keystore struct { func infoKey(name string) []byte { return []byte(fmt.Sprintf("%s.%s", name, infoSuffix)) } -func infoKeyBz(name string) []byte { return infoKey(name) } - func newKeystore(kr keyring.Keyring, opts ...Option) keystore { // Default options for keybase options := Options{ diff --git a/crypto/keyring/legacy.go b/crypto/keyring/legacy.go deleted file mode 100644 index 297f813bfd..0000000000 --- a/crypto/keyring/legacy.go +++ /dev/null @@ -1,189 +0,0 @@ -package keyring - -import ( - "fmt" - "strings" - - ostos "github.com/line/ostracon/libs/os" - dbm "github.com/line/tm-db/v2" - "github.com/pkg/errors" - - "github.com/line/lbm-sdk/crypto" - "github.com/line/lbm-sdk/crypto/types" - sdk "github.com/line/lbm-sdk/types" - sdkerrors "github.com/line/lbm-sdk/types/errors" -) - -// LegacyKeybase is implemented by the legacy keybase implementation. -type LegacyKeybase interface { - List() ([]Info, error) - Export(name string) (armor string, err error) - ExportPrivKey(name, decryptPassphrase, encryptPassphrase string) (armor string, err error) - ExportPubKey(name string) (armor string, err error) - Close() error -} - -// NewLegacy creates a new instance of a legacy keybase. -func NewLegacy(name, dir string, opts ...KeybaseOption) (LegacyKeybase, error) { - if err := ostos.EnsureDir(dir, 0700); err != nil { - return nil, fmt.Errorf("failed to create Keybase directory: %s", err) - } - - db, err := sdk.NewLevelDB(name, dir) - if err != nil { - return nil, err - } - - return newDBKeybase(db), nil -} - -var _ LegacyKeybase = dbKeybase{} - -// dbKeybase combines encryption and storage implementation to provide a -// full-featured key manager. -// -// Deprecated: dbKeybase will be removed in favor of keyringKeybase. -type dbKeybase struct { - db dbm.DB -} - -// newDBKeybase creates a new dbKeybase instance using the provided DB for -// reading and writing keys. -func newDBKeybase(db dbm.DB) dbKeybase { - return dbKeybase{ - db: db, - } -} - -// List returns the keys from storage in alphabetical order. -func (kb dbKeybase) List() ([]Info, error) { - var res []Info - - iter, err := kb.db.Iterator(nil, nil) - if err != nil { - return nil, err - } - - defer iter.Close() - - for ; iter.Valid(); iter.Next() { - key := string(iter.Key()) - - // need to include only keys in storage that have an info suffix - if strings.HasSuffix(key, infoSuffix) { - info, err := unmarshalInfo(iter.Value()) - if err != nil { - return nil, err - } - - res = append(res, info) - } - } - - return res, nil -} - -// Get returns the public information about one key. -func (kb dbKeybase) Get(name string) (Info, error) { - bs, err := kb.db.Get(infoKeyBz(name)) - if err != nil { - return nil, err - } - - if len(bs) == 0 { - return nil, sdkerrors.Wrap(sdkerrors.ErrKeyNotFound, name) - } - - return unmarshalInfo(bs) -} - -// ExportPrivateKeyObject returns a PrivKey object given the key name and -// passphrase. An error is returned if the key does not exist or if the Info for -// the key is invalid. -func (kb dbKeybase) ExportPrivateKeyObject(name string, passphrase string) (types.PrivKey, error) { - info, err := kb.Get(name) - if err != nil { - return nil, err - } - - var priv types.PrivKey - - switch i := info.(type) { - case localInfo: - linfo := i - if linfo.PrivKeyArmor == "" { - err = fmt.Errorf("private key not available") - return nil, err - } - - priv, _, err = crypto.UnarmorDecryptPrivKey(linfo.PrivKeyArmor, passphrase) - if err != nil { - return nil, err - } - - case ledgerInfo, offlineInfo, multiInfo: - return nil, errors.New("only works on local private keys") - } - - return priv, nil -} - -func (kb dbKeybase) Export(name string) (armor string, err error) { - bz, err := kb.db.Get(infoKeyBz(name)) - if err != nil { - return "", err - } - - if bz == nil { - return "", fmt.Errorf("no key to export with name %s", name) - } - - return crypto.ArmorInfoBytes(bz), nil -} - -// ExportPubKey returns public keys in ASCII armored format. It retrieves a Info -// object by its name and return the public key in a portable format. -func (kb dbKeybase) ExportPubKey(name string) (armor string, err error) { - bz, err := kb.db.Get(infoKeyBz(name)) - if err != nil { - return "", err - } - - if bz == nil { - return "", fmt.Errorf("no key to export with name %s", name) - } - - info, err := unmarshalInfo(bz) - if err != nil { - return - } - - return crypto.ArmorPubKeyBytes(info.GetPubKey().Bytes(), string(info.GetAlgo())), nil -} - -// ExportPrivKey returns a private key in ASCII armored format. -// It returns an error if the key does not exist or a wrong encryption passphrase -// is supplied. -func (kb dbKeybase) ExportPrivKey(name string, decryptPassphrase string, - encryptPassphrase string) (armor string, err error) { - priv, err := kb.ExportPrivateKeyObject(name, decryptPassphrase) - if err != nil { - return "", err - } - - info, err := kb.Get(name) - if err != nil { - return "", err - } - - return crypto.EncryptArmorPrivKey(priv, encryptPassphrase, string(info.GetAlgo())), nil -} - -// Close the underlying storage. -func (kb dbKeybase) Close() error { return kb.db.Close() } - -// KeybaseOption overrides options for the db. -type KeybaseOption func(*kbOptions) - -type kbOptions struct { -}