diff --git a/baseapp/abci.go b/baseapp/abci.go index 748232cf7e87..fb51a0dc6275 100644 --- a/baseapp/abci.go +++ b/baseapp/abci.go @@ -334,7 +334,7 @@ func (app *BaseApp) Commit() (res abci.ResponseCommit) { app.halt() } - app.snapshotManager.SnapshotIfApplicable(header.Height) + go app.snapshotManager.SnapshotIfApplicable(header.Height) app.logger.Info("committed ABCI", "height", commitID.Version, "commit_hash", fmt.Sprintf("%X", commitID.Hash), "retain_height", retainHeight) return abci.ResponseCommit{ diff --git a/baseapp/abci_test.go b/baseapp/abci_test.go index add79d384005..25e477217ac0 100644 --- a/baseapp/abci_test.go +++ b/baseapp/abci_test.go @@ -4,14 +4,16 @@ import ( "fmt" "testing" - "github.com/cosmos/cosmos-sdk/snapshots" - snaphotsTestUtil "github.com/cosmos/cosmos-sdk/testutil/snapshots" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" tmprototypes "github.com/tendermint/tendermint/proto/tendermint/types" dbm "github.com/tendermint/tm-db" + + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" + "github.com/cosmos/cosmos-sdk/snapshots" + snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" + snaphotstestutil "github.com/cosmos/cosmos-sdk/testutil/snapshots" + sdk "github.com/cosmos/cosmos-sdk/types" ) func TestGetBlockRentionHeight(t *testing.T) { @@ -19,7 +21,7 @@ func TestGetBlockRentionHeight(t *testing.T) { db := dbm.NewMemDB() name := t.Name() - snapshotStore, err := snapshots.NewStore(dbm.NewMemDB(), snaphotsTestUtil.GetTempDir(t)) + snapshotStore, err := snapshots.NewStore(dbm.NewMemDB(), snaphotstestutil.GetTempDir(t)) require.NoError(t, err) testCases := map[string]struct { @@ -43,9 +45,9 @@ func TestGetBlockRentionHeight(t *testing.T) { "pruning iavl snapshot only": { bapp: NewBaseApp( name, logger, db, nil, - SetPruning(sdk.NewPruningOptions(sdk.Nothing)), + SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)), SetMinRetainBlocks(1), - SetSnapshot(snapshotStore, sdk.NewSnapshotOptions(10000, 1)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(10000, 1)), ), maxAgeBlocks: 0, commitHeight: 499000, @@ -54,7 +56,7 @@ func TestGetBlockRentionHeight(t *testing.T) { "pruning state sync snapshot only": { bapp: NewBaseApp( name, logger, db, nil, - SetSnapshot(snapshotStore, sdk.NewSnapshotOptions(50000, 3)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(50000, 3)), SetMinRetainBlocks(1), ), maxAgeBlocks: 0, @@ -73,9 +75,9 @@ func TestGetBlockRentionHeight(t *testing.T) { "pruning all conditions": { bapp: NewBaseApp( name, logger, db, nil, - SetPruning(sdk.NewCustomPruningOptions(0, 0)), + SetPruning(pruningtypes.NewCustomPruningOptions(0, 0)), SetMinRetainBlocks(400000), - SetSnapshot(snapshotStore, sdk.NewSnapshotOptions(50000, 3)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(50000, 3)), ), maxAgeBlocks: 362880, commitHeight: 499000, @@ -84,9 +86,9 @@ func TestGetBlockRentionHeight(t *testing.T) { "no pruning due to no persisted state": { bapp: NewBaseApp( name, logger, db, nil, - SetPruning(sdk.NewCustomPruningOptions(0, 0)), + SetPruning(pruningtypes.NewCustomPruningOptions(0, 0)), SetMinRetainBlocks(400000), - SetSnapshot(snapshotStore, sdk.NewSnapshotOptions(50000, 3)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(50000, 3)), ), maxAgeBlocks: 362880, commitHeight: 10000, @@ -95,9 +97,9 @@ func TestGetBlockRentionHeight(t *testing.T) { "disable pruning": { bapp: NewBaseApp( name, logger, db, nil, - SetPruning(sdk.NewCustomPruningOptions(0, 0)), + SetPruning(pruningtypes.NewCustomPruningOptions(0, 0)), SetMinRetainBlocks(0), - SetSnapshot(snapshotStore, sdk.NewSnapshotOptions(50000, 3)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(50000, 3)), ), maxAgeBlocks: 362880, commitHeight: 499000, diff --git a/baseapp/baseapp.go b/baseapp/baseapp.go index 101f4130b803..cf995cf7ae78 100644 --- a/baseapp/baseapp.go +++ b/baseapp/baseapp.go @@ -1,7 +1,6 @@ package baseapp import ( - "errors" "fmt" "reflect" "strings" @@ -302,13 +301,10 @@ func (app *BaseApp) init() error { app.Seal() rms, ok := app.cms.(*rootmulti.Store) - if !ok && app.snapshotManager != nil { - return errors.New("state sync snapshots require a rootmulti store") + if !ok { + return fmt.Errorf("invalid commit multi-store; expected %T, got: %T", &rootmulti.Store{}, app.cms) } - if err := rms.GetPruning().Validate(); err != nil { - return err - } - return nil + return rms.GetPruning().Validate() } func (app *BaseApp) setMinGasPrices(gasPrices sdk.DecCoins) { diff --git a/baseapp/baseapp_test.go b/baseapp/baseapp_test.go index 811a6868f080..8693e62dc574 100644 --- a/baseapp/baseapp_test.go +++ b/baseapp/baseapp_test.go @@ -22,12 +22,12 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/codec" + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/snapshots" snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" "github.com/cosmos/cosmos-sdk/store/rootmulti" - storeTypes "github.com/cosmos/cosmos-sdk/store/types" - pruningTypes "github.com/cosmos/cosmos-sdk/pruning/types" - snaphotsTestUtil "github.com/cosmos/cosmos-sdk/testutil/snapshots" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + snaphotstestutil "github.com/cosmos/cosmos-sdk/testutil/snapshots" "github.com/cosmos/cosmos-sdk/testutil/testdata" sdk "github.com/cosmos/cosmos-sdk/types" sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" @@ -44,11 +44,11 @@ type paramStore struct { } type setupConfig struct { - blocks uint64 - blockTxs int - snapshotInterval uint64 + blocks uint64 + blockTxs int + snapshotInterval uint64 snapshotKeepEvery uint32 - pruningOpts *sdk.PruningOptions + pruningOpts pruningtypes.PruningOptions } func (ps *paramStore) Set(_ sdk.Context, key []byte, value interface{}) { @@ -150,7 +150,7 @@ func setupBaseAppWithSnapshots(t *testing.T, config *setupConfig) (*BaseApp, fun os.RemoveAll(snapshotDir) } - app, err := setupBaseApp(t, routerOpt, SetSnapshot(snapshotStore, sdk.NewSnapshotOptions(config.snapshotInterval, uint32(config.snapshotKeepEvery))), SetPruning(config.pruningOpts)) + app, err := setupBaseApp(t, routerOpt, SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(config.snapshotInterval, uint32(config.snapshotKeepEvery))), SetPruning(config.pruningOpts)) if err != nil { return nil, nil, err } @@ -214,7 +214,7 @@ func TestMountStores(t *testing.T) { // Test that LoadLatestVersion actually does. func TestLoadVersion(t *testing.T) { logger := defaultLogger() - pruningOpt := SetPruning(sdk.NewPruningOptions(sdk.Nothing)) + pruningOpt := SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) db := dbm.NewMemDB() name := t.Name() app := NewBaseApp(name, logger, db, nil, pruningOpt) @@ -267,15 +267,15 @@ func useDefaultLoader(app *BaseApp) { func initStore(t *testing.T, db dbm.DB, storeKey string, k, v []byte) { rs := rootmulti.NewStore(db, log.NewNopLogger()) - rs.SetPruning(sdk.NewPruningOptions(sdk.Nothing)) + rs.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) key := sdk.NewKVStoreKey(storeKey) - rs.MountStoreWithDB(key, storeTypes.StoreTypeIAVL, nil) + rs.MountStoreWithDB(key, storetypes.StoreTypeIAVL, nil) err := rs.LoadLatestVersion() require.Nil(t, err) require.Equal(t, int64(0), rs.LastCommitID().Version) // write some data in substore - kv, _ := rs.GetStore(key).(storeTypes.KVStore) + kv, _ := rs.GetStore(key).(storetypes.KVStore) require.NotNil(t, kv) kv.Set(k, v) commitID := rs.Commit() @@ -284,15 +284,15 @@ func initStore(t *testing.T, db dbm.DB, storeKey string, k, v []byte) { func checkStore(t *testing.T, db dbm.DB, ver int64, storeKey string, k, v []byte) { rs := rootmulti.NewStore(db, log.NewNopLogger()) - rs.SetPruning(sdk.NewPruningOptions(sdk.Default)) + rs.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningDefault)) key := sdk.NewKVStoreKey(storeKey) - rs.MountStoreWithDB(key, storeTypes.StoreTypeIAVL, nil) + rs.MountStoreWithDB(key, storetypes.StoreTypeIAVL, nil) err := rs.LoadLatestVersion() require.Nil(t, err) require.Equal(t, ver, rs.LastCommitID().Version) // query data in substore - kv, _ := rs.GetStore(key).(storeTypes.KVStore) + kv, _ := rs.GetStore(key).(storetypes.KVStore) require.NotNil(t, kv) require.Equal(t, v, kv.Get(k)) } @@ -327,7 +327,7 @@ func TestSetLoader(t *testing.T) { initStore(t, db, tc.origStoreKey, k, v) // load the app with the existing db - opts := []func(*BaseApp){SetPruning(sdk.NewPruningOptions(sdk.Nothing))} + opts := []func(*BaseApp){SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))} if tc.setLoader != nil { opts = append(opts, tc.setLoader) } @@ -350,7 +350,7 @@ func TestSetLoader(t *testing.T) { func TestVersionSetterGetter(t *testing.T) { logger := defaultLogger() - pruningOpt := SetPruning(sdk.NewPruningOptions(sdk.Default)) + pruningOpt := SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningDefault)) db := dbm.NewMemDB() name := t.Name() app := NewBaseApp(name, logger, db, nil, pruningOpt) @@ -370,7 +370,7 @@ func TestVersionSetterGetter(t *testing.T) { func TestLoadVersionInvalid(t *testing.T) { logger := log.NewNopLogger() - pruningOpt := SetPruning(sdk.NewPruningOptions(sdk.Nothing)) + pruningOpt := SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) db := dbm.NewMemDB() name := t.Name() app := NewBaseApp(name, logger, db, nil, pruningOpt) @@ -402,14 +402,14 @@ func TestLoadVersionInvalid(t *testing.T) { func TestLoadVersionPruning(t *testing.T) { logger := log.NewNopLogger() - pruningOptions := sdk.NewCustomPruningOptions(10, 15) + pruningOptions := pruningtypes.NewCustomPruningOptions(10, 15) pruningOpt := SetPruning(pruningOptions) db := dbm.NewMemDB() name := t.Name() - snapshotStore, err := snapshots.NewStore(dbm.NewMemDB(), snaphotsTestUtil.GetTempDir(t)) + snapshotStore, err := snapshots.NewStore(dbm.NewMemDB(), snaphotstestutil.GetTempDir(t)) require.NoError(t, err) - snapshotOpt := SetSnapshot(snapshotStore, sdk.NewSnapshotOptions(3, 1)) + snapshotOpt := SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(3, 1)) app := NewBaseApp(name, logger, db, nil, pruningOpt, snapshotOpt) @@ -1813,11 +1813,11 @@ func TestGetMaximumBlockGas(t *testing.T) { func TestListSnapshots(t *testing.T) { setupConfig := &setupConfig{ - blocks: 5, - blockTxs: 4, - snapshotInterval: 2, + blocks: 5, + blockTxs: 4, + snapshotInterval: 2, snapshotKeepEvery: 2, - pruningOpts: sdk.NewPruningOptions(sdk.Nothing), + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), } app, teardown, err := setupBaseAppWithSnapshots(t, setupConfig) @@ -1839,17 +1839,17 @@ func TestListSnapshots(t *testing.T) { func TestSnapshotWithPruning(t *testing.T) { testcases := map[string]struct { - config *setupConfig + config *setupConfig expectedSnapshots []*abci.Snapshot - expectedErr error - } { + expectedErr error + }{ "prune nothing with snapshot": { config: &setupConfig{ - blocks: 20, - blockTxs: 2, - snapshotInterval: 5, + blocks: 20, + blockTxs: 2, + snapshotInterval: 5, snapshotKeepEvery: 1, - pruningOpts: sdk.NewPruningOptions(sdk.Nothing), + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), }, expectedSnapshots: []*abci.Snapshot{ {Height: 20, Format: 1, Chunks: 5}, @@ -1857,11 +1857,11 @@ func TestSnapshotWithPruning(t *testing.T) { }, "prune everything with snapshot": { config: &setupConfig{ - blocks: 20, - blockTxs: 2, - snapshotInterval: 5, + blocks: 20, + blockTxs: 2, + snapshotInterval: 5, snapshotKeepEvery: 1, - pruningOpts: sdk.NewPruningOptions(sdk.Everything), + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningEverything), }, expectedSnapshots: []*abci.Snapshot{ {Height: 20, Format: 1, Chunks: 5}, @@ -1869,11 +1869,11 @@ func TestSnapshotWithPruning(t *testing.T) { }, "default pruning with snapshot": { config: &setupConfig{ - blocks: 20, - blockTxs: 2, - snapshotInterval: 5, + blocks: 20, + blockTxs: 2, + snapshotInterval: 5, snapshotKeepEvery: 1, - pruningOpts: sdk.NewPruningOptions(sdk.Default), + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningDefault), }, expectedSnapshots: []*abci.Snapshot{ {Height: 20, Format: 1, Chunks: 5}, @@ -1881,11 +1881,11 @@ func TestSnapshotWithPruning(t *testing.T) { }, "custom": { config: &setupConfig{ - blocks: 25, - blockTxs: 2, - snapshotInterval: 5, + blocks: 25, + blockTxs: 2, + snapshotInterval: 5, snapshotKeepEvery: 2, - pruningOpts: sdk.NewCustomPruningOptions(12, 12), + pruningOpts: pruningtypes.NewCustomPruningOptions(12, 12), }, expectedSnapshots: []*abci.Snapshot{ {Height: 25, Format: 1, Chunks: 6}, @@ -1894,20 +1894,20 @@ func TestSnapshotWithPruning(t *testing.T) { }, "no snapshots": { config: &setupConfig{ - blocks: 10, - blockTxs: 2, + blocks: 10, + blockTxs: 2, snapshotInterval: 0, // 0 implies disable snapshots - pruningOpts: sdk.NewPruningOptions(sdk.Nothing), + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), }, expectedSnapshots: []*abci.Snapshot{}, }, "keep all snapshots": { config: &setupConfig{ - blocks: 10, - blockTxs: 2, - snapshotInterval: 3, + blocks: 10, + blockTxs: 2, + snapshotInterval: 3, snapshotKeepEvery: 0, // 0 implies keep all snapshots - pruningOpts: sdk.NewPruningOptions(sdk.Nothing), + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), }, expectedSnapshots: []*abci.Snapshot{ {Height: 9, Format: 1, Chunks: 2}, @@ -1942,17 +1942,17 @@ func TestSnapshotWithPruning(t *testing.T) { // Validate that heights were pruned correctly by querying the state at the last height that should be present relative to latest // and the first height that should be pruned. - // + // // Exceptions: // * Prune nothing: should be able to query all heights (we only test first and latest) // * Prune default: should be able to query all heights (we only test first and latest) // * The reason for default behaving this way is that we only commit 20 heights but default has 100_000 keep-recent var lastExistingHeight int64 - if tc.config.pruningOpts.GetPruningStrategy() == sdk.Nothing || tc.config.pruningOpts.GetPruningStrategy() == sdk.Default { + if tc.config.pruningOpts.GetPruningStrategy() == pruningtypes.PruningNothing || tc.config.pruningOpts.GetPruningStrategy() == pruningtypes.PruningDefault { lastExistingHeight = 1 } else { // Integer division rounds down so by multiplying back we get the last height at which we pruned - lastExistingHeight = int64((tc.config.blocks / tc.config.pruningOpts.Interval) * tc.config.pruningOpts.Interval - tc.config.pruningOpts.KeepRecent) + lastExistingHeight = int64((tc.config.blocks/tc.config.pruningOpts.Interval)*tc.config.pruningOpts.Interval - tc.config.pruningOpts.KeepRecent) } // Query 1 @@ -1962,10 +1962,10 @@ func TestSnapshotWithPruning(t *testing.T) { // Query 2 res = app.Query(abci.RequestQuery{Path: fmt.Sprintf("/store/%s/key", capKey2.Name()), Data: []byte("0"), Height: lastExistingHeight - 1}) - require.NotNil(t, res, "height: %d", lastExistingHeight - 1) - if tc.config.pruningOpts.GetPruningStrategy() == sdk.Nothing || tc.config.pruningOpts.GetPruningStrategy() == sdk.Default { + require.NotNil(t, res, "height: %d", lastExistingHeight-1) + if tc.config.pruningOpts.GetPruningStrategy() == pruningtypes.PruningNothing || tc.config.pruningOpts.GetPruningStrategy() == pruningtypes.PruningDefault { // With prune nothing or default, we query height 0 which translates to the latest height. - require.NotNil(t, res.Value, "height: %d", lastExistingHeight - 1) + require.NotNil(t, res.Value, "height: %d", lastExistingHeight-1) } }) } @@ -1973,11 +1973,11 @@ func TestSnapshotWithPruning(t *testing.T) { func TestLoadSnapshotChunk(t *testing.T) { setupConfig := &setupConfig{ - blocks: 2, - blockTxs: 5, - snapshotInterval: 2, + blocks: 2, + blockTxs: 5, + snapshotInterval: 2, snapshotKeepEvery: 2, - pruningOpts: sdk.NewPruningOptions(sdk.Nothing), + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), } app, teardown, err := setupBaseAppWithSnapshots(t, setupConfig) require.NoError(t, err) @@ -2017,11 +2017,11 @@ func TestLoadSnapshotChunk(t *testing.T) { func TestOfferSnapshot_Errors(t *testing.T) { // Set up app before test cases, since it's fairly expensive. setupConfig := &setupConfig{ - blocks: 0, - blockTxs: 0, - snapshotInterval: 2, + blocks: 0, + blockTxs: 0, + snapshotInterval: 2, snapshotKeepEvery: 2, - pruningOpts: sdk.NewPruningOptions(sdk.Nothing), + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), } app, teardown, err := setupBaseAppWithSnapshots(t, setupConfig) require.NoError(t, err) @@ -2080,22 +2080,22 @@ func TestOfferSnapshot_Errors(t *testing.T) { func TestApplySnapshotChunk(t *testing.T) { setupConfig1 := &setupConfig{ - blocks: 4, - blockTxs: 10, - snapshotInterval: 2, + blocks: 4, + blockTxs: 10, + snapshotInterval: 2, snapshotKeepEvery: 2, - pruningOpts: sdk.NewPruningOptions(sdk.Nothing), + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), } source, teardown, err := setupBaseAppWithSnapshots(t, setupConfig1) require.NoError(t, err) defer teardown() setupConfig2 := &setupConfig{ - blocks: 0, - blockTxs: 0, - snapshotInterval: 2, + blocks: 0, + blockTxs: 0, + snapshotInterval: 2, snapshotKeepEvery: 2, - pruningOpts: sdk.NewPruningOptions(sdk.Nothing), + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), } target, teardown, err := setupBaseAppWithSnapshots(t, setupConfig2) require.NoError(t, err) @@ -2246,135 +2246,135 @@ func TestBaseApp_Init(t *testing.T) { name := t.Name() logger := defaultLogger() - snapshotStore, err := snapshots.NewStore(dbm.NewMemDB(), snaphotsTestUtil.GetTempDir(t)) + snapshotStore, err := snapshots.NewStore(dbm.NewMemDB(), snaphotstestutil.GetTempDir(t)) require.NoError(t, err) testCases := map[string]struct { - bapp *BaseApp - expectedPruning *sdk.PruningOptions - expectedSnapshot *snapshottypes.SnapshotOptions - expectedErr error + bapp *BaseApp + expectedPruning pruningtypes.PruningOptions + expectedSnapshot snapshottypes.SnapshotOptions + expectedErr error }{ "snapshot but no pruning": { NewBaseApp(name, logger, db, nil, - SetSnapshot(snapshotStore, sdk.NewSnapshotOptions(1500, 2)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(1500, 2)), ), - sdk.NewPruningOptions(sdk.Nothing), - sdk.NewSnapshotOptions(1500, 2), + pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + snapshottypes.NewSnapshotOptions(1500, 2), // if no pruning is set, the default is PruneNothing nil, }, "pruning everything only": { NewBaseApp(name, logger, db, nil, - SetPruning(sdk.NewPruningOptions(sdk.Everything)), + SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningEverything)), ), - sdk.NewPruningOptions(sdk.Everything), - nil, + pruningtypes.NewPruningOptions(pruningtypes.PruningEverything), + snapshottypes.NewSnapshotOptions(snapshottypes.SnapshotIntervalOff, 0), nil, }, "pruning nothing only": { NewBaseApp(name, logger, db, nil, - SetPruning(sdk.NewPruningOptions(sdk.Nothing)), + SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)), ), - sdk.NewPruningOptions(sdk.Nothing), - nil, + pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + snapshottypes.NewSnapshotOptions(snapshottypes.SnapshotIntervalOff, 0), nil, }, "pruning default only": { NewBaseApp(name, logger, db, nil, - SetPruning(sdk.NewPruningOptions(sdk.Default)), + SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningDefault)), ), - sdk.NewPruningOptions(sdk.Default), - nil, + pruningtypes.NewPruningOptions(pruningtypes.PruningDefault), + snapshottypes.NewSnapshotOptions(snapshottypes.SnapshotIntervalOff, 0), nil, }, "pruning custom only": { NewBaseApp(name, logger, db, nil, - SetPruning(sdk.NewCustomPruningOptions(10, 10)), + SetPruning(pruningtypes.NewCustomPruningOptions(10, 10)), ), - sdk.NewCustomPruningOptions(10, 10), - nil, + pruningtypes.NewCustomPruningOptions(10, 10), + snapshottypes.NewSnapshotOptions(snapshottypes.SnapshotIntervalOff, 0), nil, }, "pruning everything and snapshots": { NewBaseApp(name, logger, db, nil, - SetPruning(sdk.NewPruningOptions(sdk.Everything)), - SetSnapshot(snapshotStore, sdk.NewSnapshotOptions(1500, 2)), + SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningEverything)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(1500, 2)), ), - sdk.NewPruningOptions(sdk.Everything), - sdk.NewSnapshotOptions(1500, 2), + pruningtypes.NewPruningOptions(pruningtypes.PruningEverything), + snapshottypes.NewSnapshotOptions(1500, 2), nil, }, "pruning nothing and snapshots": { NewBaseApp(name, logger, db, nil, - SetPruning(sdk.NewPruningOptions(sdk.Nothing)), - SetSnapshot(snapshotStore, sdk.NewSnapshotOptions(1500, 2)), + SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(1500, 2)), ), - sdk.NewPruningOptions(sdk.Nothing), - sdk.NewSnapshotOptions(1500, 2), + pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + snapshottypes.NewSnapshotOptions(1500, 2), nil, }, "pruning default and snapshots": { NewBaseApp(name, logger, db, nil, - SetPruning(sdk.NewPruningOptions(sdk.Default)), - SetSnapshot(snapshotStore, sdk.NewSnapshotOptions(1500, 2)), + SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningDefault)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(1500, 2)), ), - sdk.NewPruningOptions(sdk.Default), - sdk.NewSnapshotOptions(1500, 2), + pruningtypes.NewPruningOptions(pruningtypes.PruningDefault), + snapshottypes.NewSnapshotOptions(1500, 2), nil, }, "pruning custom and snapshots": { NewBaseApp(name, logger, db, nil, - SetPruning(sdk.NewCustomPruningOptions(10, 10)), - SetSnapshot(snapshotStore, sdk.NewSnapshotOptions(1500, 2)), + SetPruning(pruningtypes.NewCustomPruningOptions(10, 10)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(1500, 2)), ), - sdk.NewCustomPruningOptions(10, 10), - sdk.NewSnapshotOptions(1500, 2), + pruningtypes.NewCustomPruningOptions(10, 10), + snapshottypes.NewSnapshotOptions(1500, 2), nil, }, "error custom pruning 0 interval": { NewBaseApp(name, logger, db, nil, - SetPruning(sdk.NewCustomPruningOptions(10, 0)), - SetSnapshot(snapshotStore, sdk.NewSnapshotOptions(1500, 2)), + SetPruning(pruningtypes.NewCustomPruningOptions(10, 0)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(1500, 2)), ), - sdk.NewCustomPruningOptions(10, 0), - sdk.NewSnapshotOptions(1500, 2), - pruningTypes.ErrPruningIntervalZero, + pruningtypes.NewCustomPruningOptions(10, 0), + snapshottypes.NewSnapshotOptions(1500, 2), + pruningtypes.ErrPruningIntervalZero, }, "error custom pruning too small interval": { NewBaseApp(name, logger, db, nil, - SetPruning(sdk.NewCustomPruningOptions(10, 9)), - SetSnapshot(snapshotStore, sdk.NewSnapshotOptions(1500, 2)), + SetPruning(pruningtypes.NewCustomPruningOptions(10, 9)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(1500, 2)), ), - sdk.NewCustomPruningOptions(10, 9), - sdk.NewSnapshotOptions(1500, 2), - pruningTypes.ErrPruningIntervalTooSmall, + pruningtypes.NewCustomPruningOptions(10, 9), + snapshottypes.NewSnapshotOptions(1500, 2), + pruningtypes.ErrPruningIntervalTooSmall, }, "error custom pruning too small keep recent": { NewBaseApp(name, logger, db, nil, - SetPruning(sdk.NewCustomPruningOptions(9, 10)), - SetSnapshot(snapshotStore, sdk.NewSnapshotOptions(1500, 2)), + SetPruning(pruningtypes.NewCustomPruningOptions(9, 10)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(1500, 2)), ), - sdk.NewCustomPruningOptions(9, 10), - sdk.NewSnapshotOptions(1500, 2), - pruningTypes.ErrPruningKeepRecentTooSmall, + pruningtypes.NewCustomPruningOptions(9, 10), + snapshottypes.NewSnapshotOptions(1500, 2), + pruningtypes.ErrPruningKeepRecentTooSmall, }, "snapshot zero interval - manager not set": { NewBaseApp(name, logger, db, nil, - SetPruning(sdk.NewCustomPruningOptions(10, 10)), - SetSnapshot(snapshotStore, sdk.NewSnapshotOptions(0, 2)), + SetPruning(pruningtypes.NewCustomPruningOptions(10, 10)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(0, 2)), ), - sdk.NewCustomPruningOptions(10, 10), - nil, // the snapshot manager is not set when interval is 0 + pruningtypes.NewCustomPruningOptions(10, 10), + snapshottypes.NewSnapshotOptions(snapshottypes.SnapshotIntervalOff, 0), nil, }, "snapshot zero keep recent - allowed": { NewBaseApp(name, logger, db, nil, - SetPruning(sdk.NewCustomPruningOptions(10, 10)), - SetSnapshot(snapshotStore, sdk.NewSnapshotOptions(1500, 0)), + SetPruning(pruningtypes.NewCustomPruningOptions(10, 10)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(1500, 0)), ), - sdk.NewCustomPruningOptions(10, 10), - sdk.NewSnapshotOptions(1500, 0), // 0 snapshot-keep-recent means keep all + pruningtypes.NewCustomPruningOptions(10, 10), + snapshottypes.NewSnapshotOptions(1500, 0), // 0 snapshot-keep-recent means keep all nil, }, } @@ -2390,7 +2390,7 @@ func TestBaseApp_Init(t *testing.T) { actualPruning := tc.bapp.cms.GetPruning() require.Equal(t, tc.expectedPruning, actualPruning) - if tc.expectedSnapshot == nil { + if tc.expectedSnapshot.Interval == snapshottypes.SnapshotIntervalOff { require.Nil(t, tc.bapp.snapshotManager) continue } diff --git a/baseapp/options.go b/baseapp/options.go index 3e48499e5a5e..0a294fdbadcc 100644 --- a/baseapp/options.go +++ b/baseapp/options.go @@ -7,7 +7,9 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/codec/types" + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/snapshots" + snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" "github.com/cosmos/cosmos-sdk/store" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -16,7 +18,7 @@ import ( // for options that need access to non-exported fields of the BaseApp // SetPruning sets a pruning option on the multistore associated with the app -func SetPruning(opts *sdk.PruningOptions) func(*BaseApp) { +func SetPruning(opts pruningtypes.PruningOptions) func(*BaseApp) { return func(bapp *BaseApp) { bapp.cms.SetPruning(opts) } } @@ -69,7 +71,7 @@ func SetInterBlockCache(cache sdk.MultiStorePersistentCache) func(*BaseApp) { } // SetSnapshot sets the snapshot store. -func SetSnapshot(snapshotStore *snapshots.Store, opts *sdk.SnapshotOptions) func(*BaseApp) { +func SetSnapshot(snapshotStore *snapshots.Store, opts snapshottypes.SnapshotOptions) func(*BaseApp) { return func(app *BaseApp) { app.SetSnapshot(snapshotStore, opts) } } @@ -199,11 +201,11 @@ func (app *BaseApp) SetRouter(router sdk.Router) { } // SetSnapshot sets the snapshot store and options. -func (app *BaseApp) SetSnapshot(snapshotStore *snapshots.Store, opts *sdk.SnapshotOptions) { +func (app *BaseApp) SetSnapshot(snapshotStore *snapshots.Store, opts snapshottypes.SnapshotOptions) { if app.sealed { panic("SetSnapshot() on sealed BaseApp") } - if snapshotStore == nil || opts.Interval == 0 { + if snapshotStore == nil || opts.Interval == snapshottypes.SnapshotIntervalOff { app.snapshotManager = nil return } diff --git a/pruning/README.md b/pruning/README.md index af6291a5c400..80a685427740 100644 --- a/pruning/README.md +++ b/pruning/README.md @@ -2,28 +2,29 @@ ## Overview -Pruning is the mechanism for deleting old heights from the disk. Depending on the use case, +Pruning is the mechanism for deleting old application heights from the disk. Depending on the use case, nodes may require different pruning strategies. For example, archive nodes must keep all the states and prune nothing. On the other hand, a regular validator node may want to only keep 100 latest heights for performance reasons. ## Strategies -The strategies are configured in `app.toml`: -pruning = "< strategy >" # where the options are: -- `default`: only the last 100,000 states(approximately 1 week worth of state) are kept; pruning at 100 block intervals -- `nothing`: all historic states will be saved, nothing will be deleted (i.e. archiving node) -- `everything`: 10 latest states will be kept; pruning at 10 block intervals. -- `custom`: allow pruning options to be manually specified through 'pruning-keep-recent', and 'pruning-interval' +The strategies are configured in `app.toml`, with the format `pruning = ""` where the options are: -If no strategy is given to `Baseapp`, `nothing` is selected. However, we perform validation on the cli layer to require these to be always set in the config file. +- `default`: only the last 100,000 states(approximately 1 week worth of state) are kept; pruning at 100 block intervals +- `nothing`: all historic states will be saved, nothing will be deleted (i.e. archiving node) +- `everything`: 10 latest states will be kept; pruning at 10 block intervals. +- `custom`: allow pruning options to be manually specified through 'pruning-keep-recent', and 'pruning-interval' + +If no strategy is given to the BaseApp, `nothing` is selected. However, we perform validation on the CLI layer to require these to be always set in the config file. ## Custom Pruning These are applied if and only if the pruning strategy is custom: -- `pruning-keep-recent`: N means to keep all of the last N states -- `pruning-interval`: N means to delete old states from disk every Nth block. -## Relationship to Snapshots +- `pruning-keep-recent`: N means to keep all of the last N states +- `pruning-interval`: N means to delete old states from disk every Nth block. + +## Relationship to State Sync Snapshots Snapshot settings are optional. However, if set, they have an effect on how pruning is done by persisting the heights that are multiples of `state-sync.snapshot-interval` until after the snapshot is complete. See the "Relationship to Pruning" section in `snapshots/README.md` for more details. diff --git a/pruning/export_test.go b/pruning/export_test.go new file mode 100644 index 000000000000..8c38778bf93d --- /dev/null +++ b/pruning/export_test.go @@ -0,0 +1,11 @@ +package pruning + +var ( + PruneHeightsKey = pruneHeightsKey + PruneSnapshotHeightsKey = pruneSnapshotHeightsKey + + Int64SliceToBytes = int64SliceToBytes + ListToBytes = listToBytes + LoadPruningHeights = loadPruningHeights + LoadPruningSnapshotHeights = loadPruningSnapshotHeights +) diff --git a/pruning/manager.go b/pruning/manager.go index b527ac3e3623..00dbd54769f4 100644 --- a/pruning/manager.go +++ b/pruning/manager.go @@ -12,67 +12,107 @@ import ( dbm "github.com/tendermint/tm-db" ) +// Manager is an abstraction to handle the logic needed for +// determinging when to prune old heights of the store +// based on the strategy described by the pruning options. type Manager struct { - logger log.Logger - db dbm.DB - opts *types.PruningOptions - snapshotInterval uint64 - pruneHeights []int64 - pruneSnapshotHeights *list.List - mx sync.Mutex + db dbm.DB + logger log.Logger + opts types.PruningOptions + snapshotInterval uint64 + // Although pruneHeights happen in the same goroutine with the normal execution, + // we sync access to them to avoid soundness issues in the future if concurrency pattern changes. + pruneHeightsMx sync.Mutex + pruneHeights []int64 + // Snapshots are taken in a separate goroutine from the regular execution + // and can be delivered asynchrounously via HandleHeightSnapshot. + // Therefore, we sync access to pruneSnapshotHeights with this mutex. + pruneSnapshotHeightsMx sync.Mutex + // These are the heights that are multiples of snapshotInterval and kept for state sync snapshots. + // The heights are added to this list to be pruned when a snapshot is complete. + pruneSnapshotHeights *list.List } -const ( - pruneHeightsKey = "s/pruneheights" - pruneSnapshotHeightsKey = "s/pruneSnheights" +// NegativeHeightsError is returned when a negative height is provided to the manager. +type NegativeHeightsError struct { + Height int64 +} + +var _ error = &NegativeHeightsError{} + +func (e *NegativeHeightsError) Error() string { + return fmt.Sprintf("failed to get pruned heights: %d", e.Height) +} - uint64Size = 8 +var ( + pruneHeightsKey = []byte("s/pruneheights") + pruneSnapshotHeightsKey = []byte("s/prunesnapshotheights") ) -func NewManager(logger log.Logger, db dbm.DB) *Manager { +// NewManager returns a new Manager with the given db and logger. +// The retuned manager uses a pruning strategy of "nothing" which +// keeps all heights. Users of the Manager may change the strategy +// by calling SetOptions. +func NewManager(db dbm.DB, logger log.Logger) *Manager { return &Manager{ - logger: logger, - db: db, - opts: types.NewPruningOptions(types.PruningNothing), - pruneHeights: []int64{}, - // These are the heights that are multiples of snapshotInterval and kept for state sync snapshots. - // The heights are added to this list to be pruned when a snapshot is complete. + db: db, + logger: logger, + opts: types.NewPruningOptions(types.PruningNothing), + pruneHeights: []int64{}, pruneSnapshotHeights: list.New(), - mx: sync.Mutex{}, } } // SetOptions sets the pruning strategy on the manager. -func (m *Manager) SetOptions(opts *types.PruningOptions) { +func (m *Manager) SetOptions(opts types.PruningOptions) { m.opts = opts } // GetOptions fetches the pruning strategy from the manager. -func (m *Manager) GetOptions() *types.PruningOptions { +func (m *Manager) GetOptions() types.PruningOptions { return m.opts } -// GetPruningHeights returns all heights to be pruned during the next call to Prune(). -func (m *Manager) GetPruningHeights() []int64 { - return m.pruneHeights -} +// GetFlushAndResetPruningHeights returns all heights to be pruned during the next call to Prune(). +// It also flushes and resets the pruning heights. +func (m *Manager) GetFlushAndResetPruningHeights() ([]int64, error) { + if m.opts.GetPruningStrategy() == types.PruningNothing { + return []int64{}, nil + } + m.pruneHeightsMx.Lock() + defer m.pruneHeightsMx.Unlock() -// ResetPruningHeights resets the heights to be pruned. -func (m *Manager) ResetPruningHeights() { - m.pruneHeights = make([]int64, 0) + // flush the updates to disk so that it is not lost if crash happens. + if err := m.db.SetSync(pruneHeightsKey, int64SliceToBytes(m.pruneHeights)); err != nil { + return nil, err + } + + // Return a copy to prevent data races. + pruningHeights := make([]int64, len(m.pruneHeights)) + copy(pruningHeights, m.pruneHeights) + m.pruneHeights = m.pruneHeights[:0] + + return pruningHeights, nil } -// HandleHeight determines if pruneHeight height needs to be kept for pruning at the right interval prescribed by -// the pruning strategy. Returns true if the given height was kept to be pruned at the next call to Prune(), false otherwise +// HandleHeight determines if previousHeight height needs to be kept for pruning at the right interval prescribed by +// the pruning strategy. Returns previousHeight, if it was kept to be pruned at the next call to Prune(), 0 otherwise. +// previousHeight must be greater than 0 for the handling to take effect since valid heights start at 1 and 0 represents +// the latest height. The latest height cannot be pruned. As a result, if previousHeight is less than or equal to 0, 0 is returned. func (m *Manager) HandleHeight(previousHeight int64) int64 { - if m.opts.GetPruningStrategy() == types.PruningNothing { + if m.opts.GetPruningStrategy() == types.PruningNothing || previousHeight <= 0 { return 0 } defer func() { - // handle persisted snapshot heights - m.mx.Lock() - defer m.mx.Unlock() + m.pruneHeightsMx.Lock() + defer m.pruneHeightsMx.Unlock() + + m.pruneSnapshotHeightsMx.Lock() + defer m.pruneSnapshotHeightsMx.Unlock() + + // move persisted snapshot heights to pruneHeights which + // represent the heights to be pruned at the next pruning interval. var next *list.Element for e := m.pruneSnapshotHeights.Front(); e != nil; e = next { snHeight := e.Value.(int64) @@ -86,6 +126,11 @@ func (m *Manager) HandleHeight(previousHeight int64) int64 { next = e.Next() } } + + // flush the updates to disk so that they are not lost if crash happens. + if err := m.db.SetSync(pruneHeightsKey, int64SliceToBytes(m.pruneHeights)); err != nil { + panic(err) + } }() if int64(m.opts.KeepRecent) < previousHeight { @@ -96,6 +141,9 @@ func (m *Manager) HandleHeight(previousHeight int64) int64 { // - snapshotInterval % (height - KeepRecent) != 0 as that means the height is not // a 'snapshot' height. if m.snapshotInterval == 0 || pruneHeight%int64(m.snapshotInterval) != 0 { + m.pruneHeightsMx.Lock() + defer m.pruneHeightsMx.Unlock() + m.pruneHeights = append(m.pruneHeights, pruneHeight) return pruneHeight } @@ -103,14 +151,25 @@ func (m *Manager) HandleHeight(previousHeight int64) int64 { return 0 } +// HandleHeightSnapshot persists the snapshot height to be pruned at the next appropriate +// height defined by the pruning strategy. Flushes the update to disk and panics if the flush fails +// The input height must be greater than 0 and pruning strategy any but pruning nothing. +// If one of these conditions is not met, this function does nothing. func (m *Manager) HandleHeightSnapshot(height int64) { - if m.opts.GetPruningStrategy() == types.PruningNothing { + if m.opts.GetPruningStrategy() == types.PruningNothing || height <= 0 { return } - m.mx.Lock() - defer m.mx.Unlock() - m.logger.Debug("HandleHeightSnapshot", "height", height) // TODO: change log level to Debug + + m.pruneSnapshotHeightsMx.Lock() + defer m.pruneSnapshotHeightsMx.Unlock() + + m.logger.Debug("HandleHeightSnapshot", "height", height) m.pruneSnapshotHeights.PushBack(height) + + // flush the updates to disk so that they are not lost if crash happens. + if err := m.db.SetSync(pruneSnapshotHeightsKey, listToBytes(m.pruneSnapshotHeights)); err != nil { + panic(err) + } } // SetSnapshotInterval sets the interval at which the snapshots are taken. @@ -120,111 +179,104 @@ func (m *Manager) SetSnapshotInterval(snapshotInterval uint64) { // ShouldPruneAtHeight return true if the given height should be pruned, false otherwise func (m *Manager) ShouldPruneAtHeight(height int64) bool { - return m.opts.GetPruningStrategy() != types.PruningNothing && m.opts.Interval > 0 && height%int64(m.opts.Interval) == 0 -} - -// FlushPruningHeights flushes the pruning heights to the database for crash recovery. -func (m *Manager) FlushPruningHeights() { - if m.opts.GetPruningStrategy() == types.PruningNothing { - return - } - batch := m.db.NewBatch() - defer batch.Close() - m.flushPruningHeights(batch) - m.flushPruningSnapshotHeights(batch) - - if err := batch.WriteSync(); err != nil { - panic(fmt.Errorf("error on batch write %w", err)) - } + return m.opts.Interval > 0 && m.opts.GetPruningStrategy() != types.PruningNothing && height%int64(m.opts.Interval) == 0 } // LoadPruningHeights loads the pruning heights from the database as a crash recovery. -func (m *Manager) LoadPruningHeights(db dbm.DB) error { +func (m *Manager) LoadPruningHeights() error { if m.opts.GetPruningStrategy() == types.PruningNothing { return nil } - if err := m.loadPruningHeights(db); err != nil { + loadedPruneHeights, err := loadPruningHeights(m.db) + if err != nil { return err } - if err := m.loadPruningSnapshotHeights(db); err != nil { + + if len(loadedPruneHeights) > 0 { + m.pruneHeightsMx.Lock() + defer m.pruneHeightsMx.Unlock() + m.pruneHeights = loadedPruneHeights + } + + loadedPruneSnapshotHeights, err := loadPruningSnapshotHeights(m.db) + if err != nil { return err } + + if loadedPruneSnapshotHeights.Len() > 0 { + m.pruneSnapshotHeightsMx.Lock() + defer m.pruneSnapshotHeightsMx.Unlock() + m.pruneSnapshotHeights = loadedPruneSnapshotHeights + } + return nil } -func (m *Manager) loadPruningHeights(db dbm.DB) error { - bz, err := db.Get([]byte(pruneHeightsKey)) +func loadPruningHeights(db dbm.DB) ([]int64, error) { + bz, err := db.Get(pruneHeightsKey) if err != nil { - return fmt.Errorf("failed to get pruned heights: %w", err) + return nil, fmt.Errorf("failed to get pruned heights: %w", err) } if len(bz) == 0 { - return nil + return []int64{}, nil } prunedHeights := make([]int64, len(bz)/8) i, offset := 0, 0 for offset < len(bz) { - prunedHeights[i] = int64(binary.BigEndian.Uint64(bz[offset : offset+8])) + h := int64(binary.BigEndian.Uint64(bz[offset : offset+8])) + if h < 0 { + return []int64{}, &NegativeHeightsError{Height: h} + } + + prunedHeights[i] = h i++ offset += 8 } - if len(prunedHeights) > 0 { - m.pruneHeights = prunedHeights - } - - return nil + return prunedHeights, nil } -func (m *Manager) loadPruningSnapshotHeights(db dbm.DB) error { - bz, err := db.Get([]byte(pruneSnapshotHeightsKey)) +func loadPruningSnapshotHeights(db dbm.DB) (*list.List, error) { + bz, err := db.Get(pruneSnapshotHeightsKey) if err != nil { - return fmt.Errorf("failed to get post-snapshot pruned heights: %w", err) + return nil, fmt.Errorf("failed to get post-snapshot pruned heights: %w", err) } + pruneSnapshotHeights := list.New() if len(bz) == 0 { - return nil + return pruneSnapshotHeights, nil } - pruneSnapshotHeights := list.New() i, offset := 0, 0 for offset < len(bz) { - pruneSnapshotHeights.PushBack(int64(binary.BigEndian.Uint64(bz[offset : offset+8]))) + h := int64(binary.BigEndian.Uint64(bz[offset : offset+8])) + if h < 0 { + return nil, &NegativeHeightsError{Height: h} + } + pruneSnapshotHeights.PushBack(h) i++ offset += 8 } - if pruneSnapshotHeights.Len() > 0 { - m.mx.Lock() - defer m.mx.Unlock() - m.pruneSnapshotHeights = pruneSnapshotHeights - } - - return nil + return pruneSnapshotHeights, nil } -func (m *Manager) flushPruningHeights(batch dbm.Batch) { - bz := make([]byte, 0, len(m.pruneHeights) * uint64Size) - for _, ph := range m.pruneHeights { - buf := make([]byte, uint64Size) +func int64SliceToBytes(slice []int64) []byte { + bz := make([]byte, 0, len(slice)*8) + for _, ph := range slice { + buf := make([]byte, 8) binary.BigEndian.PutUint64(buf, uint64(ph)) bz = append(bz, buf...) } - - if err := batch.Set([]byte(pruneHeightsKey), bz); err != nil { - panic(err) - } + return bz } -func (m *Manager) flushPruningSnapshotHeights(batch dbm.Batch) { - m.mx.Lock() - defer m.mx.Unlock() - bz := make([]byte, 0, m.pruneSnapshotHeights.Len() * uint64Size) - for e := m.pruneSnapshotHeights.Front(); e != nil; e = e.Next() { - buf := make([]byte, uint64Size) +func listToBytes(list *list.List) []byte { + bz := make([]byte, 0, list.Len()*8) + for e := list.Front(); e != nil; e = e.Next() { + buf := make([]byte, 8) binary.BigEndian.PutUint64(buf, uint64(e.Value.(int64))) bz = append(bz, buf...) } - if err := batch.Set([]byte(pruneSnapshotHeightsKey), bz); err != nil { - panic(err) - } + return bz } diff --git a/pruning/manager_test.go b/pruning/manager_test.go index edb9624576cb..972ac791852e 100644 --- a/pruning/manager_test.go +++ b/pruning/manager_test.go @@ -2,88 +2,93 @@ package pruning_test import ( "container/list" + "errors" "fmt" - "sync" "testing" - "time" "github.com/cosmos/cosmos-sdk/pruning" + "github.com/cosmos/cosmos-sdk/pruning/mock" "github.com/cosmos/cosmos-sdk/pruning/types" + "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/log" db "github.com/tendermint/tm-db" ) -func Test_NewManager(t *testing.T) { - manager := pruning.NewManager(log.NewNopLogger(), db.NewMemDB()) +const dbErr = "db error" + +func TestNewManager(t *testing.T) { + manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) require.NotNil(t, manager) - require.NotNil(t, manager.GetPruningHeights()) + heights, err := manager.GetFlushAndResetPruningHeights() + require.NoError(t, err) + require.NotNil(t, heights) require.Equal(t, types.PruningNothing, manager.GetOptions().GetPruningStrategy()) } -func Test_Strategies(t *testing.T) { +func TestStrategies(t *testing.T) { testcases := map[string]struct { - strategy *types.PruningOptions + strategy types.PruningOptions snapshotInterval uint64 strategyToAssert types.PruningStrategy - isValid bool + isValid bool }{ "prune nothing - no snapshot": { - strategy: types.NewPruningOptions(types.PruningNothing), + strategy: types.NewPruningOptions(types.PruningNothing), strategyToAssert: types.PruningNothing, }, "prune nothing - snapshot": { - strategy: types.NewPruningOptions(types.PruningNothing), + strategy: types.NewPruningOptions(types.PruningNothing), strategyToAssert: types.PruningNothing, snapshotInterval: 100, }, "prune default - no snapshot": { - strategy: types.NewPruningOptions(types.PruningDefault), + strategy: types.NewPruningOptions(types.PruningDefault), strategyToAssert: types.PruningDefault, }, "prune default - snapshot": { - strategy: types.NewPruningOptions(types.PruningDefault), + strategy: types.NewPruningOptions(types.PruningDefault), strategyToAssert: types.PruningDefault, snapshotInterval: 100, }, "prune everything - no snapshot": { - strategy: types.NewPruningOptions(types.PruningEverything), + strategy: types.NewPruningOptions(types.PruningEverything), strategyToAssert: types.PruningEverything, }, "prune everything - snapshot": { - strategy: types.NewPruningOptions(types.PruningEverything), + strategy: types.NewPruningOptions(types.PruningEverything), strategyToAssert: types.PruningEverything, snapshotInterval: 100, }, "custom 100-10-15": { - strategy: types.NewCustomPruningOptions(100, 15), + strategy: types.NewCustomPruningOptions(100, 15), snapshotInterval: 10, strategyToAssert: types.PruningCustom, }, "custom 10-10-15": { - strategy: types.NewCustomPruningOptions(10, 15), + strategy: types.NewCustomPruningOptions(10, 15), snapshotInterval: 10, strategyToAssert: types.PruningCustom, }, "custom 100-0-15": { - strategy: types.NewCustomPruningOptions(100, 15), + strategy: types.NewCustomPruningOptions(100, 15), snapshotInterval: 0, strategyToAssert: types.PruningCustom, }, } - manager := pruning.NewManager(log.NewNopLogger(), db.NewMemDB()) + manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) require.NotNil(t, manager) for name, tc := range testcases { t.Run(name, func(t *testing.T) { - curStrategy := tc.strategy + curStrategy := tc.strategy manager.SetSnapshotInterval(tc.snapshotInterval) - + pruneStrategy := curStrategy.GetPruningStrategy() require.Equal(t, tc.strategyToAssert, pruneStrategy) @@ -112,7 +117,8 @@ func Test_Strategies(t *testing.T) { handleHeightActual := manager.HandleHeight(curHeight) shouldPruneAtHeightActual := manager.ShouldPruneAtHeight(curHeight) - curPruningHeihts := manager.GetPruningHeights() + curPruningHeihts, err := manager.GetFlushAndResetPruningHeights() + require.Nil(t, err) curHeightStr := fmt.Sprintf("height: %d", curHeight) @@ -121,7 +127,9 @@ func Test_Strategies(t *testing.T) { require.Equal(t, int64(0), handleHeightActual, curHeightStr) require.False(t, shouldPruneAtHeightActual, curHeightStr) - require.Equal(t, 0, len(manager.GetPruningHeights())) + heights, err := manager.GetFlushAndResetPruningHeights() + require.NoError(t, err) + require.Equal(t, 0, len(heights)) default: if curHeight > int64(curKeepRecent) && (tc.snapshotInterval != 0 && (curHeight-int64(curKeepRecent))%int64(tc.snapshotInterval) != 0 || tc.snapshotInterval == 0) { expectedHeight := curHeight - int64(curKeepRecent) @@ -131,34 +139,271 @@ func Test_Strategies(t *testing.T) { } else { require.Equal(t, int64(0), handleHeightActual, curHeightStr) - require.Equal(t, 0, len(manager.GetPruningHeights())) + heights, err := manager.GetFlushAndResetPruningHeights() + require.NoError(t, err) + require.Equal(t, 0, len(heights)) } require.Equal(t, curHeight%int64(curInterval) == 0, shouldPruneAtHeightActual, curHeightStr) } - manager.ResetPruningHeights() - require.Equal(t, 0, len(manager.GetPruningHeights())) + heights, err := manager.GetFlushAndResetPruningHeights() + require.NoError(t, err) + require.Equal(t, 0, len(heights)) } }) } } -func Test_FlushLoad(t *testing.T) { - const ( - totalHeights = 1000 - snapshotInterval = uint64(10) +func TestHandleHeight_Inputs(t *testing.T) { + var keepRecent int64 = int64(types.NewPruningOptions(types.PruningEverything).KeepRecent) + + testcases := map[string]struct { + height int64 + expectedResult int64 + strategy types.PruningStrategy + expectedHeights []int64 + }{ + "previousHeight is negative - prune everything - invalid previousHeight": { + -1, + 0, + types.PruningEverything, + []int64{}, + }, + "previousHeight is zero - prune everything - invalid previousHeight": { + 0, + 0, + types.PruningEverything, + []int64{}, + }, + "previousHeight is positive but within keep recent- prune everything - not kept": { + keepRecent, + 0, + types.PruningEverything, + []int64{}, + }, + "previousHeight is positive and greater than keep recent - kept": { + keepRecent + 1, + keepRecent + 1 - keepRecent, + types.PruningEverything, + []int64{keepRecent + 1 - keepRecent}, + }, + "pruning nothing, previousHeight is positive and greater than keep recent - not kept": { + keepRecent + 1, + 0, + types.PruningNothing, + []int64{}, + }, + } + + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) + require.NotNil(t, manager) + manager.SetOptions(types.NewPruningOptions(tc.strategy)) + + handleHeightActual := manager.HandleHeight(tc.height) + require.Equal(t, tc.expectedResult, handleHeightActual) + + actualHeights, err := manager.GetFlushAndResetPruningHeights() + require.NoError(t, err) + require.Equal(t, len(tc.expectedHeights), len(actualHeights)) + require.Equal(t, tc.expectedHeights, actualHeights) + }) + } +} + +func TestHandleHeight_FlushLoadFromDisk(t *testing.T) { + testcases := map[string]struct { + previousHeight int64 + keepRecent uint64 + snapshotInterval uint64 + movedSnapshotHeights []int64 + expectedHandleHeightResult int64 + expectedLoadPruningHeightsResult error + expectedLoadedHeights []int64 + }{ + "simple flush occurs": { + previousHeight: 11, + keepRecent: 10, + snapshotInterval: 0, + movedSnapshotHeights: []int64{}, + expectedHandleHeightResult: 11 - 10, + expectedLoadPruningHeightsResult: nil, + expectedLoadedHeights: []int64{11 - 10}, + }, + "previous height <= keep recent - no update and no flush": { + previousHeight: 9, + keepRecent: 10, + snapshotInterval: 0, + movedSnapshotHeights: []int64{}, + expectedHandleHeightResult: 0, + expectedLoadPruningHeightsResult: nil, + expectedLoadedHeights: []int64{}, + }, + "previous height alligns with snapshot interval - no update and no flush": { + previousHeight: 12, + keepRecent: 10, + snapshotInterval: 2, + movedSnapshotHeights: []int64{}, + expectedHandleHeightResult: 0, + expectedLoadPruningHeightsResult: nil, + expectedLoadedHeights: []int64{}, + }, + "previous height does not align with snapshot interval - flush": { + previousHeight: 12, + keepRecent: 10, + snapshotInterval: 3, + movedSnapshotHeights: []int64{}, + expectedHandleHeightResult: 2, + expectedLoadPruningHeightsResult: nil, + expectedLoadedHeights: []int64{2}, + }, + "moved snapshot heights - flushed": { + previousHeight: 32, + keepRecent: 10, + snapshotInterval: 5, + movedSnapshotHeights: []int64{15, 20, 25}, + expectedHandleHeightResult: 22, + expectedLoadPruningHeightsResult: nil, + expectedLoadedHeights: []int64{15, 20, 22}, + }, + "previous height alligns with snapshot interval - no update but flush snapshot heights": { + previousHeight: 30, + keepRecent: 10, + snapshotInterval: 5, + movedSnapshotHeights: []int64{15, 20, 25}, + expectedHandleHeightResult: 0, + expectedLoadPruningHeightsResult: nil, + expectedLoadedHeights: []int64{15}, + }, + } + + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + // Setup + db := db.NewMemDB() + manager := pruning.NewManager(db, log.NewNopLogger()) + require.NotNil(t, manager) + + manager.SetSnapshotInterval(tc.snapshotInterval) + manager.SetOptions(types.NewCustomPruningOptions(uint64(tc.keepRecent), uint64(10))) + + for _, snapshotHeight := range tc.movedSnapshotHeights { + manager.HandleHeightSnapshot(snapshotHeight) + } + + // Test HandleHeight and flush + handleHeightActual := manager.HandleHeight(tc.previousHeight) + require.Equal(t, tc.expectedHandleHeightResult, handleHeightActual) + + loadedPruneHeights, err := pruning.LoadPruningHeights(db) + require.NoError(t, err) + require.Equal(t, len(loadedPruneHeights), len(loadedPruneHeights)) + + // Test load back + err = manager.LoadPruningHeights() + require.NoError(t, err) + + heights, err := manager.GetFlushAndResetPruningHeights() + require.NoError(t, err) + require.Equal(t, len(tc.expectedLoadedHeights), len(heights)) + require.ElementsMatch(t, tc.expectedLoadedHeights, heights) + }) + } +} + +func TestHandleHeight_DbErr_Panic(t *testing.T) { + + ctrl := gomock.NewController(t) + + // Setup + dbMock := mock.NewMockDB(ctrl) + + dbMock.EXPECT().SetSync(gomock.Any(), gomock.Any()).Return(errors.New(dbErr)).Times(1) + + manager := pruning.NewManager(dbMock, log.NewNopLogger()) + manager.SetOptions(types.NewPruningOptions(types.PruningEverything)) + require.NotNil(t, manager) + + defer func() { + if r := recover(); r == nil { + t.Fail() + } + }() + + manager.HandleHeight(10) +} + +func TestHandleHeightSnapshot_FlushLoadFromDisk(t *testing.T) { + loadedHeightsMirror := []int64{} + + // Setup + db := db.NewMemDB() + manager := pruning.NewManager(db, log.NewNopLogger()) + require.NotNil(t, manager) + + manager.SetOptions(types.NewPruningOptions(types.PruningEverything)) + for snapshotHeight := int64(-1); snapshotHeight < 100; snapshotHeight++ { + // Test flush + manager.HandleHeightSnapshot(snapshotHeight) + + // Post test + if snapshotHeight > 0 { + loadedHeightsMirror = append(loadedHeightsMirror, snapshotHeight) + } + + loadedSnapshotHeights, err := pruning.LoadPruningSnapshotHeights(db) + require.NoError(t, err) + require.Equal(t, len(loadedHeightsMirror), loadedSnapshotHeights.Len()) + + // Test load back + err = manager.LoadPruningHeights() + require.NoError(t, err) + + loadedSnapshotHeights, err = pruning.LoadPruningSnapshotHeights(db) + require.NoError(t, err) + require.Equal(t, len(loadedHeightsMirror), loadedSnapshotHeights.Len()) + } +} + +func TestHandleHeightSnapshot_DbErr_Panic(t *testing.T) { + + ctrl := gomock.NewController(t) + + // Setup + dbMock := mock.NewMockDB(ctrl) + + dbMock.EXPECT().SetSync(gomock.Any(), gomock.Any()).Return(errors.New(dbErr)).Times(1) + + manager := pruning.NewManager(dbMock, log.NewNopLogger()) + manager.SetOptions(types.NewPruningOptions(types.PruningEverything)) + require.NotNil(t, manager) + + defer func() { + if r := recover(); r == nil { + t.Fail() + } + }() + + manager.HandleHeightSnapshot(10) +} + +func TestFlushLoad(t *testing.T) { + const ( + totalHeights = 1000 + snapshotInterval = uint64(10) pruningKeepRecent = 100 - pruningInterval = 15 + pruningInterval = 15 ) var ( - db = db.NewMemDB() - manager = pruning.NewManager(log.NewNopLogger(), db) - curStrategy = types.NewCustomPruningOptions(pruningKeepRecent, pruningInterval) + db = db.NewMemDB() + manager = pruning.NewManager(db, log.NewNopLogger()) + curStrategy = types.NewCustomPruningOptions(pruningKeepRecent, pruningInterval) heightsToPruneMirror = make([]int64, 0) ) require.NotNil(t, manager) - + manager.SetSnapshotInterval(snapshotInterval) manager.SetOptions(curStrategy) @@ -177,99 +422,125 @@ func Test_FlushLoad(t *testing.T) { require.Equal(t, int64(0), handleHeightActual, curHeightStr) } - if manager.ShouldPruneAtHeight(curHeight) { - manager.ResetPruningHeights() - heightsToPruneMirror = make([]int64, 0) - } - - // N.B.: There is no reason behind the choice of 3. - if curHeight%3 == 0 { - require.Equal(t, heightsToPruneMirror, manager.GetPruningHeights(), curHeightStr) - manager.FlushPruningHeights() + if manager.ShouldPruneAtHeight(curHeight) && curHeight > int64(pruningKeepRecent) { + actualHeights, err := manager.GetFlushAndResetPruningHeights() + require.NoError(t, err) + require.Equal(t, len(heightsToPruneMirror), len(actualHeights)) + require.Equal(t, heightsToPruneMirror, actualHeights) - manager.ResetPruningHeights() - require.Equal(t, make([]int64, 0), manager.GetPruningHeights(), curHeightStr) + err = manager.LoadPruningHeights() + require.NoError(t, err) - err := manager.LoadPruningHeights(db) + actualHeights, err = manager.GetFlushAndResetPruningHeights() require.NoError(t, err) - require.Equal(t, heightsToPruneMirror, manager.GetPruningHeights(), curHeightStr) + require.Equal(t, len(heightsToPruneMirror), len(actualHeights)) + require.Equal(t, heightsToPruneMirror, actualHeights) + + heightsToPruneMirror = make([]int64, 0) } } } -func Test_WithSnapshot(t *testing.T) { - manager := pruning.NewManager(log.NewNopLogger(), db.NewMemDB()) +func TestLoadPruningHeights(t *testing.T) { + var ( + manager = pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) + err error + ) require.NotNil(t, manager) - curStrategy := types.NewCustomPruningOptions(10, 10) - - snapshotInterval := uint64(15) - manager.SetSnapshotInterval(snapshotInterval) + // must not be PruningNothing + manager.SetOptions(types.NewPruningOptions(types.PruningDefault)) - manager.SetOptions(curStrategy) - require.Equal(t, curStrategy, manager.GetOptions()) + testcases := map[string]struct { + flushedPruningHeights []int64 + getFlushedPruningSnapshotHeights func() *list.List + expectedResult error + }{ + "negative pruningHeight - error": { + flushedPruningHeights: []int64{10, 0, -1}, + expectedResult: &pruning.NegativeHeightsError{Height: -1}, + }, + "negative snapshotPruningHeight - error": { + getFlushedPruningSnapshotHeights: func() *list.List { + l := list.New() + l.PushBack(int64(5)) + l.PushBack(int64(-2)) + l.PushBack(int64(3)) + return l + }, + expectedResult: &pruning.NegativeHeightsError{Height: -2}, + }, + "both have negative - pruningHeight error": { + flushedPruningHeights: []int64{10, 0, -1}, + getFlushedPruningSnapshotHeights: func() *list.List { + l := list.New() + l.PushBack(int64(5)) + l.PushBack(int64(-2)) + l.PushBack(int64(3)) + return l + }, + expectedResult: &pruning.NegativeHeightsError{Height: -1}, + }, + "both non-negative - success": { + flushedPruningHeights: []int64{10, 0, 3}, + getFlushedPruningSnapshotHeights: func() *list.List { + l := list.New() + l.PushBack(int64(5)) + l.PushBack(int64(0)) + l.PushBack(int64(3)) + return l + }, + }, + } - keepRecent := curStrategy.KeepRecent + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + db := db.NewMemDB() + if tc.flushedPruningHeights != nil { + err = db.Set(pruning.PruneHeightsKey, pruning.Int64SliceToBytes(tc.flushedPruningHeights)) + require.NoError(t, err) + } - heightsToPruneMirror := make([]int64, 0) + if tc.getFlushedPruningSnapshotHeights != nil { + err = db.Set(pruning.PruneSnapshotHeightsKey, pruning.ListToBytes(tc.getFlushedPruningSnapshotHeights())) + require.NoError(t, err) + } - mx := sync.Mutex{} - snapshotHeightsToPruneMirror := list.New() + manager := pruning.NewManager(db, log.NewNopLogger()) + require.NotNil(t, manager) - wg := sync.WaitGroup{} + // must not be PruningNothing + manager.SetOptions(types.NewPruningOptions(types.PruningDefault)) - for curHeight := int64(1); curHeight < 100000; curHeight++ { - mx.Lock() - handleHeightActual := manager.HandleHeight(curHeight) + err = manager.LoadPruningHeights() + require.Equal(t, tc.expectedResult, err) + }) + } +} - curHeightStr := fmt.Sprintf("height: %d", curHeight) +func TestLoadPruningHeights_PruneNothing(t *testing.T) { + var manager = pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) + require.NotNil(t, manager) - if curHeight > int64(keepRecent) && (curHeight-int64(keepRecent))%int64(snapshotInterval) != 0 { - expectedHandleHeight := curHeight - int64(keepRecent) - require.Equal(t, expectedHandleHeight, handleHeightActual, curHeightStr) - heightsToPruneMirror = append(heightsToPruneMirror, expectedHandleHeight) - } else { - require.Equal(t, int64(0), handleHeightActual, curHeightStr) - } + manager.SetOptions(types.NewPruningOptions(types.PruningNothing)) - actualHeightsToPrune := manager.GetPruningHeights() + require.Nil(t, manager.LoadPruningHeights()) +} - var next *list.Element - for e := snapshotHeightsToPruneMirror.Front(); e != nil; e = next { - snapshotHeight := e.Value.(int64) - if snapshotHeight < curHeight-int64(keepRecent) { - heightsToPruneMirror = append(heightsToPruneMirror, snapshotHeight) +func TestGetFlushAndResetPruningHeights_DbErr_Panic(t *testing.T) { - // We must get next before removing to be able to continue iterating. - next = e.Next() - snapshotHeightsToPruneMirror.Remove(e) - } else { - next = e.Next() - } - } + ctrl := gomock.NewController(t) - require.Equal(t, heightsToPruneMirror, actualHeightsToPrune, curHeightStr) - mx.Unlock() + // Setup + dbMock := mock.NewMockDB(ctrl) - if manager.ShouldPruneAtHeight(curHeight) { - manager.ResetPruningHeights() - heightsToPruneMirror = make([]int64, 0) - } + dbMock.EXPECT().SetSync(gomock.Any(), gomock.Any()).Return(errors.New(dbErr)).Times(1) - // Mimic taking snapshots in the background - if curHeight%int64(snapshotInterval) == 0 { - wg.Add(1) - go func(curHeightCp int64) { - time.Sleep(time.Nanosecond * 500) - - mx.Lock() - manager.HandleHeightSnapshot(curHeightCp) - snapshotHeightsToPruneMirror.PushBack(curHeightCp) - mx.Unlock() - wg.Done() - }(curHeight) - } - } + manager := pruning.NewManager(dbMock, log.NewNopLogger()) + manager.SetOptions(types.NewPruningOptions(types.PruningEverything)) + require.NotNil(t, manager) - wg.Wait() + heights, err := manager.GetFlushAndResetPruningHeights() + require.Error(t, err) + require.Nil(t, heights) } diff --git a/pruning/mock/db_mock.go b/pruning/mock/db_mock.go new file mode 100644 index 000000000000..fb6ee740b972 --- /dev/null +++ b/pruning/mock/db_mock.go @@ -0,0 +1,420 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: /home/roman/projects/cosmos-sdk/vendor/github.com/tendermint/tm-db/types.go + +// Package mock_db is a generated GoMock package. +package mock + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + db "github.com/tendermint/tm-db" +) + +// MockDB is a mock of DB interface. +type MockDB struct { + ctrl *gomock.Controller + recorder *MockDBMockRecorder +} + +// MockDBMockRecorder is the mock recorder for MockDB. +type MockDBMockRecorder struct { + mock *MockDB +} + +// NewMockDB creates a new mock instance. +func NewMockDB(ctrl *gomock.Controller) *MockDB { + mock := &MockDB{ctrl: ctrl} + mock.recorder = &MockDBMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDB) EXPECT() *MockDBMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockDB) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockDBMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockDB)(nil).Close)) +} + +// Delete mocks base method. +func (m *MockDB) Delete(arg0 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockDBMockRecorder) Delete(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDB)(nil).Delete), arg0) +} + +// DeleteSync mocks base method. +func (m *MockDB) DeleteSync(arg0 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteSync", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteSync indicates an expected call of DeleteSync. +func (mr *MockDBMockRecorder) DeleteSync(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSync", reflect.TypeOf((*MockDB)(nil).DeleteSync), arg0) +} + +// Get mocks base method. +func (m *MockDB) Get(arg0 []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockDBMockRecorder) Get(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDB)(nil).Get), arg0) +} + +// Has mocks base method. +func (m *MockDB) Has(key []byte) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Has", key) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Has indicates an expected call of Has. +func (mr *MockDBMockRecorder) Has(key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockDB)(nil).Has), key) +} + +// Iterator mocks base method. +func (m *MockDB) Iterator(start, end []byte) (db.Iterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Iterator", start, end) + ret0, _ := ret[0].(db.Iterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Iterator indicates an expected call of Iterator. +func (mr *MockDBMockRecorder) Iterator(start, end interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterator", reflect.TypeOf((*MockDB)(nil).Iterator), start, end) +} + +// NewBatch mocks base method. +func (m *MockDB) NewBatch() db.Batch { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewBatch") + ret0, _ := ret[0].(db.Batch) + return ret0 +} + +// NewBatch indicates an expected call of NewBatch. +func (mr *MockDBMockRecorder) NewBatch() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBatch", reflect.TypeOf((*MockDB)(nil).NewBatch)) +} + +// Print mocks base method. +func (m *MockDB) Print() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Print") + ret0, _ := ret[0].(error) + return ret0 +} + +// Print indicates an expected call of Print. +func (mr *MockDBMockRecorder) Print() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Print", reflect.TypeOf((*MockDB)(nil).Print)) +} + +// ReverseIterator mocks base method. +func (m *MockDB) ReverseIterator(start, end []byte) (db.Iterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReverseIterator", start, end) + ret0, _ := ret[0].(db.Iterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReverseIterator indicates an expected call of ReverseIterator. +func (mr *MockDBMockRecorder) ReverseIterator(start, end interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReverseIterator", reflect.TypeOf((*MockDB)(nil).ReverseIterator), start, end) +} + +// Set mocks base method. +func (m *MockDB) Set(arg0, arg1 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Set", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Set indicates an expected call of Set. +func (mr *MockDBMockRecorder) Set(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockDB)(nil).Set), arg0, arg1) +} + +// SetSync mocks base method. +func (m *MockDB) SetSync(arg0, arg1 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetSync", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetSync indicates an expected call of SetSync. +func (mr *MockDBMockRecorder) SetSync(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSync", reflect.TypeOf((*MockDB)(nil).SetSync), arg0, arg1) +} + +// Stats mocks base method. +func (m *MockDB) Stats() map[string]string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Stats") + ret0, _ := ret[0].(map[string]string) + return ret0 +} + +// Stats indicates an expected call of Stats. +func (mr *MockDBMockRecorder) Stats() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stats", reflect.TypeOf((*MockDB)(nil).Stats)) +} + +// MockBatch is a mock of Batch interface. +type MockBatch struct { + ctrl *gomock.Controller + recorder *MockBatchMockRecorder +} + +// MockBatchMockRecorder is the mock recorder for MockBatch. +type MockBatchMockRecorder struct { + mock *MockBatch +} + +// NewMockBatch creates a new mock instance. +func NewMockBatch(ctrl *gomock.Controller) *MockBatch { + mock := &MockBatch{ctrl: ctrl} + mock.recorder = &MockBatchMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBatch) EXPECT() *MockBatchMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockBatch) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockBatchMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockBatch)(nil).Close)) +} + +// Delete mocks base method. +func (m *MockBatch) Delete(key []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", key) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockBatchMockRecorder) Delete(key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockBatch)(nil).Delete), key) +} + +// Set mocks base method. +func (m *MockBatch) Set(key, value []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Set", key, value) + ret0, _ := ret[0].(error) + return ret0 +} + +// Set indicates an expected call of Set. +func (mr *MockBatchMockRecorder) Set(key, value interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockBatch)(nil).Set), key, value) +} + +// Write mocks base method. +func (m *MockBatch) Write() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Write") + ret0, _ := ret[0].(error) + return ret0 +} + +// Write indicates an expected call of Write. +func (mr *MockBatchMockRecorder) Write() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockBatch)(nil).Write)) +} + +// WriteSync mocks base method. +func (m *MockBatch) WriteSync() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteSync") + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteSync indicates an expected call of WriteSync. +func (mr *MockBatchMockRecorder) WriteSync() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteSync", reflect.TypeOf((*MockBatch)(nil).WriteSync)) +} + +// MockIterator is a mock of Iterator interface. +type MockIterator struct { + ctrl *gomock.Controller + recorder *MockIteratorMockRecorder +} + +// MockIteratorMockRecorder is the mock recorder for MockIterator. +type MockIteratorMockRecorder struct { + mock *MockIterator +} + +// NewMockIterator creates a new mock instance. +func NewMockIterator(ctrl *gomock.Controller) *MockIterator { + mock := &MockIterator{ctrl: ctrl} + mock.recorder = &MockIteratorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIterator) EXPECT() *MockIteratorMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockIterator) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockIteratorMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockIterator)(nil).Close)) +} + +// Domain mocks base method. +func (m *MockIterator) Domain() ([]byte, []byte) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Domain") + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].([]byte) + return ret0, ret1 +} + +// Domain indicates an expected call of Domain. +func (mr *MockIteratorMockRecorder) Domain() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Domain", reflect.TypeOf((*MockIterator)(nil).Domain)) +} + +// Error mocks base method. +func (m *MockIterator) Error() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Error") + ret0, _ := ret[0].(error) + return ret0 +} + +// Error indicates an expected call of Error. +func (mr *MockIteratorMockRecorder) Error() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockIterator)(nil).Error)) +} + +// Key mocks base method. +func (m *MockIterator) Key() []byte { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Key") + ret0, _ := ret[0].([]byte) + return ret0 +} + +// Key indicates an expected call of Key. +func (mr *MockIteratorMockRecorder) Key() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Key", reflect.TypeOf((*MockIterator)(nil).Key)) +} + +// Next mocks base method. +func (m *MockIterator) Next() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Next") +} + +// Next indicates an expected call of Next. +func (mr *MockIteratorMockRecorder) Next() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockIterator)(nil).Next)) +} + +// Valid mocks base method. +func (m *MockIterator) Valid() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Valid") + ret0, _ := ret[0].(bool) + return ret0 +} + +// Valid indicates an expected call of Valid. +func (mr *MockIteratorMockRecorder) Valid() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Valid", reflect.TypeOf((*MockIterator)(nil).Valid)) +} + +// Value mocks base method. +func (m *MockIterator) Value() []byte { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Value") + ret0, _ := ret[0].([]byte) + return ret0 +} + +// Value indicates an expected call of Value. +func (mr *MockIteratorMockRecorder) Value() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Value", reflect.TypeOf((*MockIterator)(nil).Value)) +} diff --git a/pruning/types/options.go b/pruning/types/options.go index c2f13d921db9..93fe3ea12af5 100644 --- a/pruning/types/options.go +++ b/pruning/types/options.go @@ -59,39 +59,31 @@ var ( ErrPruningKeepRecentTooSmall = fmt.Errorf("'pruning-keep-recent' must not be less than %d. For the most aggressive pruning, select pruning = \"everything\"", pruneEverythingKeepRecent) ) -func NewPruningOptions(pruningStrategy PruningStrategy) *PruningOptions { +func NewPruningOptions(pruningStrategy PruningStrategy) PruningOptions { switch pruningStrategy { - case PruningDefault: - return &PruningOptions{ - KeepRecent: 100_000, - Interval: 100, - Strategy: PruningDefault, - } case PruningEverything: - return &PruningOptions{ + return PruningOptions{ KeepRecent: pruneEverythingKeepRecent, Interval: pruneEverythingInterval, Strategy: PruningEverything, } case PruningNothing: - return &PruningOptions{ + return PruningOptions{ KeepRecent: 0, Interval: 0, Strategy: PruningNothing, } - case PruningCustom: - return &PruningOptions{ - Strategy: PruningCustom, - } default: - return &PruningOptions{ - Strategy: PruningUndefined, + return PruningOptions{ + KeepRecent: 100_000, + Interval: 100, + Strategy: PruningDefault, } } } -func NewCustomPruningOptions(keepRecent, interval uint64) *PruningOptions { - return &PruningOptions{ +func NewCustomPruningOptions(keepRecent, interval uint64) PruningOptions { + return PruningOptions{ KeepRecent: keepRecent, Interval: interval, Strategy: PruningCustom, @@ -118,7 +110,7 @@ func (po PruningOptions) Validate() error { return nil } -func NewPruningOptionsFromString(strategy string) *PruningOptions { +func NewPruningOptionsFromString(strategy string) PruningOptions { switch strategy { case PruningOptionEverything: return NewPruningOptions(PruningEverything) diff --git a/pruning/types/options_test.go b/pruning/types/options_test.go index 00d488520f9d..497cfb510e2f 100644 --- a/pruning/types/options_test.go +++ b/pruning/types/options_test.go @@ -8,13 +8,14 @@ import ( func TestPruningOptions_Validate(t *testing.T) { testCases := []struct { - opts *PruningOptions - expectErr error + opts PruningOptions + expectErr error }{ {NewPruningOptions(PruningDefault), nil}, {NewPruningOptions(PruningEverything), nil}, {NewPruningOptions(PruningNothing), nil}, {NewCustomPruningOptions(10, 10), nil}, + {NewPruningOptions(PruningCustom), nil}, {NewCustomPruningOptions(100, 15), nil}, {NewCustomPruningOptions(9, 10), ErrPruningKeepRecentTooSmall}, {NewCustomPruningOptions(10, 9), ErrPruningIntervalTooSmall}, @@ -27,3 +28,38 @@ func TestPruningOptions_Validate(t *testing.T) { require.Equal(t, tc.expectErr, err, "options: %v, err: %s", tc.opts, err) } } + +func TestPruningOptions_GetStrategy(t *testing.T) { + testCases := []struct { + opts PruningOptions + expectedStrategy PruningStrategy + }{ + {NewPruningOptions(PruningDefault), PruningDefault}, + {NewPruningOptions(PruningEverything), PruningEverything}, + {NewPruningOptions(PruningNothing), PruningNothing}, + {NewPruningOptions(PruningCustom), PruningDefault}, + {NewCustomPruningOptions(2, 10), PruningCustom}, + } + + for _, tc := range testCases { + actualStrategy := tc.opts.GetPruningStrategy() + require.Equal(t, tc.expectedStrategy, actualStrategy) + } +} + +func TestNewPruningOptionsFromString(t *testing.T) { + testCases := []struct { + optString string + expect PruningOptions + }{ + {PruningOptionDefault, NewPruningOptions(PruningDefault)}, + {PruningOptionEverything, NewPruningOptions(PruningEverything)}, + {PruningOptionNothing, NewPruningOptions(PruningNothing)}, + {"invalid", NewPruningOptions(PruningDefault)}, + } + + for _, tc := range testCases { + actual := NewPruningOptionsFromString(tc.optString) + require.Equal(t, tc.expect, actual) + } +} diff --git a/server/config/config.go b/server/config/config.go index b5294d13815b..61a7ef809327 100644 --- a/server/config/config.go +++ b/server/config/config.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/viper" - pruningTypes "github.com/cosmos/cosmos-sdk/pruning/types" + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/telemetry" sdk "github.com/cosmos/cosmos-sdk/types" sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" @@ -204,7 +204,7 @@ func DefaultConfig() *Config { BaseConfig: BaseConfig{ MinGasPrices: defaultMinGasPrices, InterBlockCache: true, - Pruning: pruningTypes.PruningOptionDefault, + Pruning: pruningtypes.PruningOptionDefault, PruningKeepRecent: "0", PruningInterval: "0", MinRetainBlocks: 0, diff --git a/server/mock/store.go b/server/mock/store.go index e0c857fe2a64..f75daad14a17 100644 --- a/server/mock/store.go +++ b/server/mock/store.go @@ -5,6 +5,7 @@ import ( dbm "github.com/tendermint/tm-db" + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" store "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -63,11 +64,11 @@ func (ms multiStore) LastCommitID() sdk.CommitID { panic("not implemented") } -func (ms multiStore) SetPruning(opts *sdk.PruningOptions) { +func (ms multiStore) SetPruning(opts pruningtypes.PruningOptions) { panic("not implemented") } -func (ms multiStore) GetPruning() *sdk.PruningOptions { +func (ms multiStore) GetPruning() pruningtypes.PruningOptions { panic("not implemented") } @@ -79,11 +80,7 @@ func (ms multiStore) GetCommitStore(key sdk.StoreKey) sdk.CommitStore { panic("not implemented") } -func (ms multiStore) GetCommitKVStores() map[sdk.StoreKey]sdk.CommitKVStore { - panic("not implemented") -} - -func (ms multiStore) MountStoreWithDB(key sdk.StoreKey, typ sdk.StoreType, db dbm.DB) { +func (ms multiStore) MountStoreWithDB(key store.StoreKey, typ store.StoreType, db dbm.DB) { ms.kv[key] = kvStore{store: make(map[string][]byte)} } diff --git a/server/pruning.go b/server/pruning.go index 53096e409a81..2e21579032c1 100644 --- a/server/pruning.go +++ b/server/pruning.go @@ -6,22 +6,22 @@ import ( "github.com/spf13/cast" - pruningTypes "github.com/cosmos/cosmos-sdk/pruning/types" + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/server/types" ) // GetPruningOptionsFromFlags parses command flags and returns the correct // PruningOptions. If a pruning strategy is provided, that will be parsed and // returned, otherwise, it is assumed custom pruning options are provided. -func GetPruningOptionsFromFlags(appOpts types.AppOptions) (*pruningTypes.PruningOptions, error) { +func GetPruningOptionsFromFlags(appOpts types.AppOptions) (pruningtypes.PruningOptions, error) { strategy := strings.ToLower(cast.ToString(appOpts.Get(FlagPruning))) switch strategy { - case pruningTypes.PruningOptionDefault, pruningTypes.PruningOptionNothing, pruningTypes.PruningOptionEverything: - return pruningTypes.NewPruningOptionsFromString(strategy), nil + case pruningtypes.PruningOptionDefault, pruningtypes.PruningOptionNothing, pruningtypes.PruningOptionEverything: + return pruningtypes.NewPruningOptionsFromString(strategy), nil - case pruningTypes.PruningOptionCustom: - opts := pruningTypes.NewCustomPruningOptions( + case pruningtypes.PruningOptionCustom: + opts := pruningtypes.NewCustomPruningOptions( cast.ToUint64(appOpts.Get(FlagPruningKeepRecent)), cast.ToUint64(appOpts.Get(FlagPruningInterval)), ) @@ -33,6 +33,6 @@ func GetPruningOptionsFromFlags(appOpts types.AppOptions) (*pruningTypes.Pruning return opts, nil default: - return nil, fmt.Errorf("unknown pruning strategy %s", strategy) + return pruningtypes.PruningOptions{}, fmt.Errorf("unknown pruning strategy %s", strategy) } } diff --git a/server/pruning_test.go b/server/pruning_test.go index f7f5a97efe19..2d7bc976d92e 100644 --- a/server/pruning_test.go +++ b/server/pruning_test.go @@ -6,45 +6,45 @@ import ( "github.com/spf13/viper" "github.com/stretchr/testify/require" - pruningTypes "github.com/cosmos/cosmos-sdk/pruning/types" + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" ) func TestGetPruningOptionsFromFlags(t *testing.T) { tests := []struct { name string initParams func() *viper.Viper - expectedOptions *pruningTypes.PruningOptions + expectedOptions pruningtypes.PruningOptions wantErr bool }{ { name: FlagPruning, initParams: func() *viper.Viper { v := viper.New() - v.Set(FlagPruning, pruningTypes.PruningOptionNothing) + v.Set(FlagPruning, pruningtypes.PruningOptionNothing) return v }, - expectedOptions: pruningTypes.NewPruningOptions(pruningTypes.PruningNothing), + expectedOptions: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), }, { name: "custom pruning options", initParams: func() *viper.Viper { v := viper.New() - v.Set(FlagPruning, pruningTypes.PruningOptionCustom) + v.Set(FlagPruning, pruningtypes.PruningOptionCustom) v.Set(FlagPruningKeepRecent, 1234) v.Set(FlagPruningInterval, 10) return v }, - expectedOptions: pruningTypes.NewCustomPruningOptions(1234, 10), + expectedOptions: pruningtypes.NewCustomPruningOptions(1234, 10), }, { - name: pruningTypes.PruningOptionDefault, + name: pruningtypes.PruningOptionDefault, initParams: func() *viper.Viper { v := viper.New() - v.Set(FlagPruning, pruningTypes.PruningOptionDefault) + v.Set(FlagPruning, pruningtypes.PruningOptionDefault) return v }, - expectedOptions: pruningTypes.NewPruningOptions(pruningTypes.PruningDefault), + expectedOptions: pruningtypes.NewPruningOptions(pruningtypes.PruningDefault), }, } @@ -53,7 +53,7 @@ func TestGetPruningOptionsFromFlags(t *testing.T) { t.Run(tt.name, func(j *testing.T) { viper.Reset() - viper.SetDefault(FlagPruning, pruningTypes.PruningOptionDefault) + viper.SetDefault(FlagPruning, pruningtypes.PruningOptionDefault) v := tt.initParams() opts, err := GetPruningOptionsFromFlags(v) diff --git a/server/start.go b/server/start.go index 67592ed8f4d6..3636e9fd1ba1 100644 --- a/server/start.go +++ b/server/start.go @@ -10,8 +10,6 @@ import ( "runtime/pprof" "time" - "github.com/cosmos/cosmos-sdk/codec" - "github.com/spf13/cobra" "google.golang.org/grpc" @@ -31,7 +29,8 @@ import ( "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/flags" - pruningTypes "github.com/cosmos/cosmos-sdk/pruning/types" + "github.com/cosmos/cosmos-sdk/codec" + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/server/api" "github.com/cosmos/cosmos-sdk/server/config" servergrpc "github.com/cosmos/cosmos-sdk/server/grpc" @@ -151,7 +150,7 @@ which accepts a path for the resulting pprof file. cmd.Flags().Bool(FlagInterBlockCache, true, "Enable inter-block caching") cmd.Flags().String(flagCPUProfile, "", "Enable CPU profiling and write to the provided file") cmd.Flags().Bool(FlagTrace, false, "Provide full stack traces for errors in ABCI Log") - cmd.Flags().String(FlagPruning, pruningTypes.PruningOptionDefault, "Pruning strategy (default|nothing|everything|custom)") + cmd.Flags().String(FlagPruning, pruningtypes.PruningOptionDefault, "Pruning strategy (default|nothing|everything|custom)") cmd.Flags().Uint64(FlagPruningKeepRecent, 0, "Number of recent heights to keep on disk (ignored if pruning is not 'custom')") cmd.Flags().Uint64(FlagPruningInterval, 0, "Height interval at which pruned heights are removed from disk (ignored if pruning is not 'custom')") cmd.Flags().Uint(FlagInvCheckPeriod, 0, "Assert registered invariants every N blocks") diff --git a/simapp/simd/cmd/root.go b/simapp/simd/cmd/root.go index 57f086f25daa..878b397d94a7 100644 --- a/simapp/simd/cmd/root.go +++ b/simapp/simd/cmd/root.go @@ -25,6 +25,7 @@ import ( "github.com/cosmos/cosmos-sdk/simapp" "github.com/cosmos/cosmos-sdk/simapp/params" "github.com/cosmos/cosmos-sdk/snapshots" + snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" "github.com/cosmos/cosmos-sdk/store" sdk "github.com/cosmos/cosmos-sdk/types" authcmd "github.com/cosmos/cosmos-sdk/x/auth/client/cli" @@ -255,9 +256,10 @@ func (a appCreator) newApp(logger log.Logger, db dbm.DB, traceStore io.Writer, a panic(err) } - snapshotOptions := sdk.NewSnapshotOptions( + snapshotOptions := snapshottypes.NewSnapshotOptions( cast.ToUint64(appOpts.Get(server.FlagStateSyncSnapshotInterval)), - cast.ToUint32(appOpts.Get(server.FlagStateSyncSnapshotKeepRecent))) + cast.ToUint32(appOpts.Get(server.FlagStateSyncSnapshotKeepRecent)), + ) return simapp.NewSimApp( logger, db, traceStore, true, skipUpgradeHeights, diff --git a/snapshots/README.md b/snapshots/README.md index 8d7b314d86d8..db0247a61673 100644 --- a/snapshots/README.md +++ b/snapshots/README.md @@ -63,12 +63,26 @@ the `pruning.Manager` to be pruned according to the pruning settings after the n To illustrate, assume that we are currently at height 960 with `pruning-keep-recent = 50`, `pruning-interval = 10`, and `state-sync.snapshot-interval = 100`. Let's assume that -the snapshot that was triggered at height `900` just finishes. Then, we can prune height -`900` right away (that is, when we call `Commit()` at height 960) because it (`900`) is less than `960 - 50 = 910`. +the snapshot that was triggered at height `900` **just finishes**. Then, we can prune height +`900` right away (that is, when we call `Commit()` at height 960 because 900 is less than `960 - 50 = 910`. + +Let's now assume that all conditions stay the same but the snapshot at height 900 is **not complete yet**. +Then, we cannot prune it to avoid deleting a height that is still being snapshotted. Therefore, we keep track +of this height until the snapshot is complete. The height 900 will be pruned at the first height h that satisfied the following conditions: +- the snapshot is complete +- h is a multiple of `pruning-interval` +- snapshot height is less than h - `pruning-keep-recent` + +Note that in both examples, if we let current height = C, and previous height P = C - 1, then for every height h that is: + +P - `pruning-keep-recent` - `pruning-interval` <= h <= P - `pruning-keep-recent` + +we can prune height h. In our first example, all heights 899 - 909 fall in this range and are pruned at height 960 as long as +h is not a snapshot height (E.g. 900). + +That is, we always use current height to determine at which height to prune (960) while we use previous +to determine which heights are to be pruned (959 - 50 - 10 = 899-909 = 959 - 50). -Let's now assume that all settings stay the same but `pruning-keep-recent = 100`. In that case, -we cannot prune height `900` which is greater than `960 - 100 = 850`. As a result, height 900 is persisted until -we can prune it according to the pruning settings. ## Configuration diff --git a/snapshots/helpers_test.go b/snapshots/helpers_test.go index 2a1cd32d7c4f..6803d7071370 100644 --- a/snapshots/helpers_test.go +++ b/snapshots/helpers_test.go @@ -59,8 +59,8 @@ func readChunks(chunks <-chan io.ReadCloser) [][]byte { } type mockSnapshotter struct { - chunks [][]byte - prunedHeights map[int64]struct{} + chunks [][]byte + prunedHeights map[int64]struct{} snapshotInterval uint64 } @@ -138,14 +138,14 @@ func setupBusyManager(t *testing.T) *snapshots.Manager { // hungSnapshotter can be used to test operations in progress. Call close to end the snapshot. type hungSnapshotter struct { - ch chan struct{} - prunedHeights map[int64]struct{} + ch chan struct{} + prunedHeights map[int64]struct{} snapshotInterval uint64 } func newHungSnapshotter() *hungSnapshotter { return &hungSnapshotter{ - ch: make(chan struct{}), + ch: make(chan struct{}), prunedHeights: make(map[int64]struct{}), } } diff --git a/snapshots/manager.go b/snapshots/manager.go index 0d6800d2e5a3..b113cb568040 100644 --- a/snapshots/manager.go +++ b/snapshots/manager.go @@ -29,8 +29,8 @@ import ( // errors via io.Pipe.CloseWithError(). type Manager struct { // store is the snapshot store where all completed snapshots are persisted. - store *Store - opts *types.SnapshotOptions + store *Store + opts types.SnapshotOptions // target is the store from which snapshots are taken. target types.Snapshotter logger log.Logger @@ -66,7 +66,7 @@ var ( ) // NewManager creates a new manager. -func NewManager(store *Store, opts *types.SnapshotOptions, target types.Snapshotter, logger log.Logger) *Manager { +func NewManager(store *Store, opts types.SnapshotOptions, target types.Snapshotter, logger log.Logger) *Manager { target.SetSnapshotInterval(opts.Interval) return &Manager{ store: store, @@ -114,12 +114,12 @@ func (m *Manager) endLocked() { m.restoreChunkIndex = 0 } -// GetInterval returns snapshot interval. +// GetInterval returns snapshot interval represented in heights. func (m *Manager) GetInterval() uint64 { return m.opts.Interval } -// GetKeepRecent returns snapshot keep-recent. +// GetKeepRecent returns snapshot keep-recent represented in heights. func (m *Manager) GetKeepRecent() uint32 { return m.opts.KeepRecent } @@ -293,8 +293,8 @@ func (m *Manager) RestoreChunk(chunk []byte) (bool, error) { return false, nil } -// SnapshotIfApplicable takes a snapshot of the current state if we are on a snapshot height. -// It also prunes any old snapshots. The snapshotting and pruning happen in separate goroutines. +// SnapshotIfApplicable takes a snapshot of the current state if we are on a snapshot height. +// It also prunes any old snapshots. func (m *Manager) SnapshotIfApplicable(height int64) { if m == nil { return @@ -303,7 +303,7 @@ func (m *Manager) SnapshotIfApplicable(height int64) { m.logger.Debug("snapshot is skipped", "height", height) return } - go m.snapshot(height) + m.snapshot(height) } // shouldTakeSnapshot returns true is snapshot should be taken at height. @@ -314,6 +314,11 @@ func (m *Manager) shouldTakeSnapshot(height int64) bool { func (m *Manager) snapshot(height int64) { m.logger.Info("creating state snapshot", "height", height) + if height <= 0 { + m.logger.Error("snapshot height must be positive", "height", height) + return + } + snapshot, err := m.Create(uint64(height)) if err != nil { m.logger.Error("failed to create state snapshot", "height", height, "err", err) diff --git a/snapshots/store.go b/snapshots/store.go index 502b49839230..c2d6dcd81a71 100644 --- a/snapshots/store.go +++ b/snapshots/store.go @@ -32,8 +32,6 @@ type Store struct { saving map[uint64]bool // heights currently being saved } -var _ store.Store = (*Store)(nil) - // NewStore creates a new snapshot store. func NewStore(db db.DB, dir string) (*Store, error) { if dir == "" { @@ -296,11 +294,6 @@ func (s *Store) Save( return snapshot, s.saveSnapshot(snapshot) } -// GetStoreType implements the Store interface. It returns the underlying Store type. -func (*Store) GetStoreType() store.StoreType { - return store.StoreTypeSnapshot -} - // CacheWrap implements the Store interface. It panics because a Store // cannot be branched. func (*Store) CacheWrap() store.CacheWrap { diff --git a/snapshots/types/options.go b/snapshots/types/options.go index 16b078157b62..1ce39d486adb 100644 --- a/snapshots/types/options.go +++ b/snapshots/types/options.go @@ -6,12 +6,16 @@ type SnapshotOptions struct { // Interval defines at which heights the snapshot is taken. Interval uint64 - // KeepRecent defines how many snapshots to keep. + // KeepRecent defines how many snapshots to keep in heights. KeepRecent uint32 } -func NewSnapshotOptions(interval uint64, keepRecent uint32) *SnapshotOptions { - return &SnapshotOptions{ +// SnapshotIntervalOff represents the snapshot interval, at which +// no snapshots are taken. +const SnapshotIntervalOff uint64 = 0 + +func NewSnapshotOptions(interval uint64, keepRecent uint32) SnapshotOptions { + return SnapshotOptions{ Interval: interval, KeepRecent: keepRecent, } diff --git a/snapshots/types/snapshotter.go b/snapshots/types/snapshotter.go index fc60a30d838c..0bd475a32964 100644 --- a/snapshots/types/snapshotter.go +++ b/snapshots/types/snapshotter.go @@ -16,7 +16,8 @@ type Snapshotter interface { PruneSnapshotHeight(height int64) // SetSnapshotInterval sets the interval at which the snapshots are taken. - // It is used by the store to determine which heights to retain until after the snapshot is complete. + // It is used by the store that implements the Snapshotter interface + // to determine which heights to retain until after the snapshot is complete. SetSnapshotInterval(snapshotInterval uint64) // Restore restores a state snapshot, taking snapshot chunk readers as input. diff --git a/store/iavl/store.go b/store/iavl/store.go index c4b50e9b131e..b6bf1f468989 100644 --- a/store/iavl/store.go +++ b/store/iavl/store.go @@ -14,7 +14,7 @@ import ( tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" dbm "github.com/tendermint/tm-db" - pruningTypes "github.com/cosmos/cosmos-sdk/pruning/types" + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/store/cachekv" "github.com/cosmos/cosmos-sdk/store/listenkv" "github.com/cosmos/cosmos-sdk/store/tracekv" @@ -145,13 +145,13 @@ func (st *Store) LastCommitID() types.CommitID { // SetPruning panics as pruning options should be provided at initialization // since IAVl accepts pruning options directly. -func (st *Store) SetPruning(_ *pruningTypes.PruningOptions) { +func (st *Store) SetPruning(_ pruningtypes.PruningOptions) { panic("cannot set pruning options on an initialized IAVL store") } // SetPruning panics as pruning options should be provided at initialization // since IAVl accepts pruning options directly. -func (st *Store) GetPruning() *pruningTypes.PruningOptions { +func (st *Store) GetPruning() pruningtypes.PruningOptions { panic("cannot get pruning options on an initialized IAVL store") } diff --git a/store/mem/store.go b/store/mem/store.go index 1f6ebacdc2dc..06d7b63f5506 100644 --- a/store/mem/store.go +++ b/store/mem/store.go @@ -5,7 +5,7 @@ import ( dbm "github.com/tendermint/tm-db" - pruningTypes "github.com/cosmos/cosmos-sdk/pruning/types" + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/store/cachekv" "github.com/cosmos/cosmos-sdk/store/dbadapter" "github.com/cosmos/cosmos-sdk/store/listenkv" @@ -55,12 +55,12 @@ func (s Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types // Commit performs a no-op as entries are persistent between commitments. func (s *Store) Commit() (id types.CommitID) { return } -func (s *Store) SetPruning(pruning *pruningTypes.PruningOptions) {} +func (s *Store) SetPruning(pruning pruningtypes.PruningOptions) {} // GetPruning is a no-op as pruning options cannot be directly set on this store. // They must be set on the root commit multi-store. -func (s *Store) GetPruning() *pruningTypes.PruningOptions { - return pruningTypes.NewPruningOptions(pruningTypes.PruningUndefined) +func (s *Store) GetPruning() pruningtypes.PruningOptions { + return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined) } func (s Store) LastCommitID() (id types.CommitID) { return } diff --git a/store/rootmulti/dbadapter.go b/store/rootmulti/dbadapter.go index 157681461eec..bf8b0da7dbce 100644 --- a/store/rootmulti/dbadapter.go +++ b/store/rootmulti/dbadapter.go @@ -1,7 +1,7 @@ package rootmulti import ( - pruningTypes "github.com/cosmos/cosmos-sdk/pruning/types" + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/store/dbadapter" "github.com/cosmos/cosmos-sdk/store/types" ) @@ -31,10 +31,10 @@ func (cdsa commitDBStoreAdapter) LastCommitID() types.CommitID { } } -func (cdsa commitDBStoreAdapter) SetPruning(_ *pruningTypes.PruningOptions) {} +func (cdsa commitDBStoreAdapter) SetPruning(_ pruningtypes.PruningOptions) {} // GetPruning is a no-op as pruning options cannot be directly set on this store. // They must be set on the root commit multi-store. -func (cdsa commitDBStoreAdapter) GetPruning() *pruningTypes.PruningOptions { - return pruningTypes.NewPruningOptions(pruningTypes.PruningUndefined) +func (cdsa commitDBStoreAdapter) GetPruning() pruningtypes.PruningOptions { + return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined) } diff --git a/store/rootmulti/store.go b/store/rootmulti/store.go index 0924c97276bf..98ce593c5854 100644 --- a/store/rootmulti/store.go +++ b/store/rootmulti/store.go @@ -11,7 +11,7 @@ import ( "sync" "github.com/cosmos/cosmos-sdk/pruning" - pruningTypes "github.com/cosmos/cosmos-sdk/pruning/types" + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/snapshots" snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" "github.com/cosmos/cosmos-sdk/store/cachemulti" @@ -60,7 +60,7 @@ type Store struct { db dbm.DB logger log.Logger lastCommitInfo *types.CommitInfo - mx *sync.RWMutex // mutex to sync access to lastCommitInfo + mx sync.RWMutex // mutex to sync access to lastCommitInfo pruningManager *pruning.Manager iavlCacheSize int storesParams map[types.StoreKey]storeParams @@ -95,20 +95,19 @@ func NewStore(db dbm.DB, logger log.Logger) *Store { stores: make(map[types.StoreKey]types.CommitKVStore), keysByName: make(map[string]types.StoreKey), listeners: make(map[types.StoreKey][]types.WriteListener), - pruningManager: pruning.NewManager(logger, db), - mx: &sync.RWMutex{}, + pruningManager: pruning.NewManager(db, logger), } } // GetPruning fetches the pruning strategy from the root store. -func (rs *Store) GetPruning() *pruningTypes.PruningOptions { +func (rs *Store) GetPruning() pruningtypes.PruningOptions { return rs.pruningManager.GetOptions() } // SetPruning sets the pruning strategy on the root store and all the sub-stores. // Note, calling SetPruning on the root store prior to LoadVersion or // LoadLatestVersion performs a no-op as the stores aren't mounted yet. -func (rs *Store) SetPruning(pruningOpts *pruningTypes.PruningOptions) { +func (rs *Store) SetPruning(pruningOpts pruningtypes.PruningOptions) { rs.pruningManager.SetOptions(pruningOpts) } @@ -172,9 +171,9 @@ func (rs *Store) GetCommitKVStore(key types.StoreKey) types.CommitKVStore { return rs.stores[key] } -// GetCommitKVStores get all kv stores associated wit the multistore. -func (rs *Store) GetCommitKVStores() map[types.StoreKey]types.CommitKVStore { - return rs.stores +// StoreKeysByName returns mapping storeNames -> StoreKeys +func (rs *Store) StoreKeysByName() map[string]types.StoreKey { + return rs.keysByName } // LoadLatestVersionAndUpgrade implements CommitMultiStore @@ -280,7 +279,7 @@ func (rs *Store) loadVersion(ver int64, upgrades *types.StoreUpgrades) error { rs.stores = newStores // load any pruned heights we missed from disk to be pruned on the next run - if err := rs.pruningManager.LoadPruningHeights(rs.db); err != nil { + if err := rs.pruningManager.LoadPruningHeights(); err != nil { return err } @@ -530,35 +529,45 @@ func (rs *Store) GetKVStore(key types.StoreKey) types.KVStore { } func (rs *Store) handlePruning(version int64) error { - defer rs.pruningManager.FlushPruningHeights() - rs.pruningManager.HandleHeight(version - 1) // we should never prune the current version. - if rs.pruningManager.ShouldPruneAtHeight(version) { - rs.logger.Info("prune start", "height", version) + if !rs.pruningManager.ShouldPruneAtHeight(version) { + return nil + } + rs.logger.Info("prune start", "height", version) + defer rs.logger.Info("prune end", "height", version) + return rs.pruneStores() +} + +func (rs *Store) pruneStores() error { + pruningHeights, err := rs.pruningManager.GetFlushAndResetPruningHeights() + if err != nil { + return err + } + + if len(pruningHeights) == 0 { + rs.logger.Debug("pruning skipped; no heights to prune") + return nil + } - pruningHeights := rs.pruningManager.GetPruningHeights() - rs.logger.Debug(fmt.Sprintf("pruning the following heights: %v\n", pruningHeights)) + rs.logger.Debug("pruning heights", "heights", pruningHeights) - if len(pruningHeights) == 0 { - return nil + for key, store := range rs.stores { + // If the store is wrapped with an inter-block cache, we must first unwrap + // it to get the underlying IAVL store. + if store.GetStoreType() != types.StoreTypeIAVL { + continue } - for key, store := range rs.stores { - if store.GetStoreType() == types.StoreTypeIAVL { - // If the store is wrapped with an inter-block cache, we must first unwrap - // it to get the underlying IAVL store. - store = rs.GetCommitKVStore(key) + store = rs.GetCommitKVStore(key) - if err := store.(*iavl.Store).DeleteVersions(pruningHeights...); err != nil { - if errCause := errors.Cause(err); errCause != nil && errCause != iavltree.ErrVersionDoesNotExist { - return err - } - } - } + err := store.(*iavl.Store).DeleteVersions(pruningHeights...) + if err == nil { + continue + } + + if errCause := errors.Cause(err); errCause != nil && errCause != iavltree.ErrVersionDoesNotExist { + return err } - rs.pruningManager.ResetPruningHeights() - rs.logger.Info("prune end", "height", version) - return nil } return nil } @@ -579,8 +588,8 @@ func (rs *Store) getStoreByName(name string) types.Store { // Query calls substore.Query with the same `req` where `req.Path` is // modified to remove the substore prefix. // Ie. `req.Path` here is `//`, and trimmed to `/` for the substore. -// Special case: if `req.Path` is `/proofs`, the commit hash is included -// as response value. In addition, proofs of every store are appended to the response for +// Special case: if `req.Path` is `/proofs`, the commit hash is included +// as response value. In addition, proofs of every store are appended to the response for // the requested height func (rs *Store) Query(req abci.RequestQuery) abci.ResponseQuery { path := req.Path @@ -615,7 +624,6 @@ func (rs *Store) Query(req abci.RequestQuery) abci.ResponseQuery { return sdkerrors.QueryResult(sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "proof is unexpectedly empty; ensure height has not been pruned")) } - commitInfo, err := rs.getCommitInfoFromDb(res.Height) if err != nil { return sdkerrors.QueryResult(err) @@ -892,7 +900,6 @@ func (rs *Store) Restore( } rs.flushLastCommitInfo(rs.buildCommitInfo(int64(height))) - rs.pruningManager.FlushPruningHeights() return rs.LoadLatestVersion() } @@ -1048,7 +1055,7 @@ func (rs *Store) doProofsQuery(req abci.RequestQuery) abci.ResponseQuery { } for _, storeInfo := range commitInfo.StoreInfos { - res.ProofOps.Ops = append(res.ProofOps.Ops, crypto.ProofOp{Key: []byte(storeInfo.Name), Data: storeInfo.CommitId.Hash, }) + res.ProofOps.Ops = append(res.ProofOps.Ops, crypto.ProofOp{Key: []byte(storeInfo.Name), Data: storeInfo.CommitId.Hash}) } return res } diff --git a/store/rootmulti/store_test.go b/store/rootmulti/store_test.go index 5ad5886b0e72..a39a14c8e6e0 100644 --- a/store/rootmulti/store_test.go +++ b/store/rootmulti/store_test.go @@ -20,8 +20,8 @@ import ( "github.com/cosmos/cosmos-sdk/codec" codecTypes "github.com/cosmos/cosmos-sdk/codec/types" + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" - pruningTypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/store/cachemulti" "github.com/cosmos/cosmos-sdk/store/iavl" sdkmaps "github.com/cosmos/cosmos-sdk/store/internal/maps" @@ -38,7 +38,7 @@ func TestStoreType(t *testing.T) { func TestGetCommitKVStore(t *testing.T) { var db dbm.DB = dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, pruningTypes.NewPruningOptions(pruningTypes.PruningDefault)) + ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningDefault)) err := ms.LoadLatestVersion() require.Nil(t, err) @@ -71,7 +71,7 @@ func TestStoreMount(t *testing.T) { func TestCacheMultiStore(t *testing.T) { var db dbm.DB = dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, pruningTypes.NewPruningOptions(pruningTypes.PruningNothing)) + ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) cacheMulti := ms.CacheMultiStore() require.IsType(t, cachemulti.Store{}, cacheMulti) @@ -79,7 +79,7 @@ func TestCacheMultiStore(t *testing.T) { func TestCacheMultiStoreWithVersion(t *testing.T) { var db dbm.DB = dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, pruningTypes.NewPruningOptions(pruningTypes.PruningNothing)) + ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) err := ms.LoadLatestVersion() require.Nil(t, err) @@ -116,7 +116,7 @@ func TestCacheMultiStoreWithVersion(t *testing.T) { func TestHashStableWithEmptyCommit(t *testing.T) { var db dbm.DB = dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, pruningTypes.NewPruningOptions(pruningTypes.PruningNothing)) + ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) err := ms.LoadLatestVersion() require.Nil(t, err) @@ -140,7 +140,7 @@ func TestHashStableWithEmptyCommit(t *testing.T) { func TestMultistoreCommitLoad(t *testing.T) { var db dbm.DB = dbm.NewMemDB() - store := newMultiStoreWithMounts(db, pruningTypes.NewPruningOptions(pruningTypes.PruningNothing)) + store := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) err := store.LoadLatestVersion() require.Nil(t, err) @@ -165,7 +165,7 @@ func TestMultistoreCommitLoad(t *testing.T) { } // Load the latest multistore again and check version. - store = newMultiStoreWithMounts(db, pruningTypes.NewPruningOptions(pruningTypes.PruningNothing)) + store = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) err = store.LoadLatestVersion() require.Nil(t, err) commitID = getExpectedCommitID(store, nCommits) @@ -178,7 +178,7 @@ func TestMultistoreCommitLoad(t *testing.T) { // Load an older multistore and check version. ver := nCommits - 1 - store = newMultiStoreWithMounts(db, pruningTypes.NewPruningOptions(pruningTypes.PruningNothing)) + store = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) err = store.LoadVersion(ver) require.Nil(t, err) commitID = getExpectedCommitID(store, ver) @@ -187,7 +187,7 @@ func TestMultistoreCommitLoad(t *testing.T) { func TestMultistoreLoadWithUpgrade(t *testing.T) { var db dbm.DB = dbm.NewMemDB() - store := newMultiStoreWithMounts(db, pruningTypes.NewPruningOptions(pruningTypes.PruningNothing)) + store := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) err := store.LoadLatestVersion() require.Nil(t, err) @@ -222,7 +222,7 @@ func TestMultistoreLoadWithUpgrade(t *testing.T) { checkContains(t, ci.StoreInfos, []string{"store1", "store2", "store3"}) // Load without changes and make sure it is sensible - store = newMultiStoreWithMounts(db, pruningTypes.NewPruningOptions(pruningTypes.PruningNothing)) + store = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) err = store.LoadLatestVersion() require.Nil(t, err) @@ -235,7 +235,7 @@ func TestMultistoreLoadWithUpgrade(t *testing.T) { require.Equal(t, v2, s2.Get(k2)) // now, let's load with upgrades... - restore, upgrades := newMultiStoreWithModifiedMounts(db, pruningTypes.NewPruningOptions(pruningTypes.PruningNothing)) + restore, upgrades := newMultiStoreWithModifiedMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) err = restore.LoadLatestVersionAndUpgrade(upgrades) require.Nil(t, err) @@ -280,7 +280,7 @@ func TestMultistoreLoadWithUpgrade(t *testing.T) { migratedID := restore.Commit() require.Equal(t, migratedID.Version, int64(2)) - reload, _ := newMultiStoreWithModifiedMounts(db, pruningTypes.NewPruningOptions(pruningTypes.PruningNothing)) + reload, _ := newMultiStoreWithModifiedMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) err = reload.LoadLatestVersion() require.Nil(t, err) require.Equal(t, migratedID, reload.LastCommitID()) @@ -329,7 +329,7 @@ func TestParsePath(t *testing.T) { func TestMultiStoreRestart(t *testing.T) { db := dbm.NewMemDB() - pruning := pruningTypes.NewCustomPruningOptions(2, 1) + pruning := pruningtypes.NewCustomPruningOptions(2, 1) multi := newMultiStoreWithMounts(db, pruning) err := multi.LoadLatestVersion() require.Nil(t, err) @@ -408,7 +408,7 @@ func TestMultiStoreRestart(t *testing.T) { func TestMultiStoreQuery(t *testing.T) { db := dbm.NewMemDB() - multi := newMultiStoreWithMounts(db, pruningTypes.NewPruningOptions(pruningTypes.PruningNothing)) + multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) err := multi.LoadLatestVersion() require.Nil(t, err) @@ -435,7 +435,7 @@ func TestMultiStoreQuery(t *testing.T) { ver := cid2.Version // Reload multistore from database - multi = newMultiStoreWithMounts(db, pruningTypes.NewPruningOptions(pruningTypes.PruningNothing)) + multi = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) err = multi.LoadLatestVersion() require.Nil(t, err) @@ -500,15 +500,15 @@ func TestMultiStore_Pruning(t *testing.T) { testCases := []struct { name string numVersions int64 - po *pruningTypes.PruningOptions + po pruningtypes.PruningOptions deleted []int64 saved []int64 }{ - {"prune nothing", 10, pruningTypes.NewPruningOptions(pruningTypes.PruningNothing), nil, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, - {"prune everything", 10, pruningTypes.NewPruningOptions(pruningTypes.PruningEverything), []int64{1, 2, 3, 4, 5, 6, 7, 8, 9}, []int64{10}}, - {"prune some; no batch", 10, pruningTypes.NewCustomPruningOptions(2, 1), []int64{1, 2, 4, 5, 7}, []int64{3, 6, 8, 9, 10}}, - {"prune some; small batch", 10, pruningTypes.NewCustomPruningOptions(2, 3), []int64{1, 2, 4, 5}, []int64{3, 6, 7, 8, 9, 10}}, - {"prune some; large batch", 10, pruningTypes.NewCustomPruningOptions(2, 11), nil, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, + {"prune nothing", 10, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), nil, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, + {"prune everything", 10, pruningtypes.NewPruningOptions(pruningtypes.PruningEverything), []int64{1, 2, 3, 4, 5, 6, 7, 8, 9}, []int64{10}}, + {"prune some; no batch", 10, pruningtypes.NewCustomPruningOptions(2, 1), []int64{1, 2, 4, 5, 7}, []int64{3, 6, 8, 9, 10}}, + {"prune some; small batch", 10, pruningtypes.NewCustomPruningOptions(2, 3), []int64{1, 2, 4, 5}, []int64{3, 6, 7, 8, 9, 10}}, + {"prune some; large batch", 10, pruningtypes.NewCustomPruningOptions(2, 11), nil, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, } for _, tc := range testCases { @@ -536,9 +536,63 @@ func TestMultiStore_Pruning(t *testing.T) { } } +func TestMultiStore_Pruning_SameHeightsTwice(t *testing.T) { + const ( + numVersions int64 = 10 + keepRecent uint64 = 2 + interval uint64 = 10 + ) + + expectedHeights := []int64{} + for i := int64(1); i < numVersions-int64(keepRecent); i++ { + expectedHeights = append(expectedHeights, i) + } + + db := dbm.NewMemDB() + + ms := newMultiStoreWithMounts(db, pruningtypes.NewCustomPruningOptions(keepRecent, interval)) + require.NoError(t, ms.LoadLatestVersion()) + + var lastCommitInfo types.CommitID + for i := int64(0); i < numVersions; i++ { + lastCommitInfo = ms.Commit() + } + + require.Equal(t, numVersions, lastCommitInfo.Version) + + for v := int64(1); v < numVersions-int64(keepRecent); v++ { + err := ms.LoadVersion(v) + require.Error(t, err, "expected error when loading pruned height: %d", v) + } + + for v := int64(numVersions - int64(keepRecent)); v < numVersions; v++ { + err := ms.LoadVersion(v) + require.NoError(t, err, "expected no error when loading height: %d", v) + } + + // Get latest + err := ms.LoadVersion(numVersions - 1) + require.NoError(t, err) + + // Ensure already pruned heights were loaded + heights, err := ms.pruningManager.GetFlushAndResetPruningHeights() + require.NoError(t, err) + require.Equal(t, expectedHeights, heights) + + require.NoError(t, ms.pruningManager.LoadPruningHeights()) + + // Test pruning the same heights again + lastCommitInfo = ms.Commit() + require.Equal(t, numVersions, lastCommitInfo.Version) + + // Ensure that can commit one more height with no panic + lastCommitInfo = ms.Commit() + require.Equal(t, numVersions+1, lastCommitInfo.Version) +} + func TestMultiStore_PruningRestart(t *testing.T) { db := dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, pruningTypes.NewCustomPruningOptions(2, 11)) + ms := newMultiStoreWithMounts(db, pruningtypes.NewCustomPruningOptions(2, 11)) ms.SetSnapshotInterval(3) require.NoError(t, ms.LoadLatestVersion()) @@ -551,20 +605,30 @@ func TestMultiStore_PruningRestart(t *testing.T) { pruneHeights := []int64{1, 2, 4, 5, 7} // ensure we've persisted the current batch of heights to prune to the store's DB - err := ms.pruningManager.LoadPruningHeights(ms.db) + err := ms.pruningManager.LoadPruningHeights() + require.NoError(t, err) + + actualHeightsToPrune, err := ms.pruningManager.GetFlushAndResetPruningHeights() require.NoError(t, err) - require.Equal(t, pruneHeights, ms.pruningManager.GetPruningHeights()) + require.Equal(t, len(pruneHeights), len(actualHeightsToPrune)) + require.Equal(t, pruneHeights, actualHeightsToPrune) // "restart" - ms = newMultiStoreWithMounts(db, pruningTypes.NewCustomPruningOptions(2, 11)) + ms = newMultiStoreWithMounts(db, pruningtypes.NewCustomPruningOptions(2, 11)) ms.SetSnapshotInterval(3) err = ms.LoadLatestVersion() require.NoError(t, err) - require.Equal(t, pruneHeights, ms.pruningManager.GetPruningHeights()) + + actualHeightsToPrune, err = ms.pruningManager.GetFlushAndResetPruningHeights() + require.NoError(t, err) + require.Equal(t, pruneHeights, actualHeightsToPrune) // commit one more block and ensure the heights have been pruned ms.Commit() - require.Empty(t, ms.pruningManager.GetPruningHeights()) + + actualHeightsToPrune, err = ms.pruningManager.GetFlushAndResetPruningHeights() + require.NoError(t, err) + require.Empty(t, actualHeightsToPrune) for _, v := range pruneHeights { _, err := ms.CacheMultiStoreWithVersion(v) @@ -689,7 +753,7 @@ func TestMultistoreSnapshotRestore(t *testing.T) { func TestSetInitialVersion(t *testing.T) { db := dbm.NewMemDB() - multi := newMultiStoreWithMounts(db, pruningTypes.NewPruningOptions(pruningTypes.PruningNothing)) + multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) require.NoError(t, multi.LoadLatestVersion()) @@ -707,7 +771,7 @@ func TestSetInitialVersion(t *testing.T) { func TestAddListenersAndListeningEnabled(t *testing.T) { db := dbm.NewMemDB() - multi := newMultiStoreWithMounts(db, pruningTypes.NewPruningOptions(pruningTypes.PruningNothing)) + multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) testKey := types.NewKVStoreKey("listening_test_key") enabled := multi.ListeningEnabled(testKey) require.False(t, enabled) @@ -738,7 +802,7 @@ var ( func TestGetListenWrappedKVStore(t *testing.T) { buf := new(bytes.Buffer) var db dbm.DB = dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, pruningTypes.NewPruningOptions(pruningTypes.PruningNothing)) + ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) ms.LoadLatestVersion() mockListeners := []types.WriteListener{types.NewStoreKVPairWriteListener(buf, testMarshaller)} ms.AddListeners(testStoreKey1, mockListeners) @@ -814,7 +878,7 @@ func TestGetListenWrappedKVStore(t *testing.T) { func TestCacheWraps(t *testing.T) { db := dbm.NewMemDB() - multi := newMultiStoreWithMounts(db, pruningTypes.NewPruningOptions(pruningTypes.PruningNothing)) + multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) cacheWrapper := multi.CacheWrap() require.IsType(t, cachemulti.Store{}, cacheWrapper) @@ -908,7 +972,7 @@ var ( testStoreKey3 = types.NewKVStoreKey("store3") ) -func newMultiStoreWithMounts(db dbm.DB, pruningOpts *pruningTypes.PruningOptions) *Store { +func newMultiStoreWithMounts(db dbm.DB, pruningOpts pruningtypes.PruningOptions) *Store { store := NewStore(db, log.NewNopLogger()) store.SetPruning(pruningOpts) @@ -988,7 +1052,7 @@ func newMultiStoreWithGeneratedData(db dbm.DB, stores uint8, storeKeys uint64) * return multiStore } -func newMultiStoreWithModifiedMounts(db dbm.DB, pruningOpts *pruningTypes.PruningOptions) (*Store, *types.StoreUpgrades) { +func newMultiStoreWithModifiedMounts(db dbm.DB, pruningOpts pruningtypes.PruningOptions) (*Store, *types.StoreUpgrades) { store := NewStore(db, log.NewNopLogger()) store.SetPruning(pruningOpts) diff --git a/store/transient/store.go b/store/transient/store.go index 90370b5939c6..b9723b56efd5 100644 --- a/store/transient/store.go +++ b/store/transient/store.go @@ -3,7 +3,7 @@ package transient import ( dbm "github.com/tendermint/tm-db" - pruningTypes "github.com/cosmos/cosmos-sdk/pruning/types" + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/store/dbadapter" "github.com/cosmos/cosmos-sdk/store/types" ) @@ -28,12 +28,12 @@ func (ts *Store) Commit() (id types.CommitID) { return } -func (ts *Store) SetPruning(_ *pruningTypes.PruningOptions) {} +func (ts *Store) SetPruning(_ pruningtypes.PruningOptions) {} // GetPruning is a no-op as pruning options cannot be directly set on this store. // They must be set on the root commit multi-store. -func (ts *Store) GetPruning() *pruningTypes.PruningOptions { - return pruningTypes.NewPruningOptions(pruningTypes.PruningUndefined) +func (ts *Store) GetPruning() pruningtypes.PruningOptions { + return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined) } // Implements CommitStore diff --git a/store/transient/store_test.go b/store/transient/store_test.go index 8b99872e69c0..48324aa9491d 100644 --- a/store/transient/store_test.go +++ b/store/transient/store_test.go @@ -6,9 +6,9 @@ import ( "github.com/stretchr/testify/require" + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/store/transient" "github.com/cosmos/cosmos-sdk/store/types" - pruningTypes "github.com/cosmos/cosmos-sdk/pruning/types" ) var k, v = []byte("hello"), []byte("world") @@ -27,7 +27,7 @@ func TestTransientStore(t *testing.T) { require.Nil(t, tstore.Get(k)) // no-op - tstore.SetPruning(pruningTypes.NewPruningOptions(pruningTypes.PruningUndefined)) + tstore.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined)) emptyCommitID := tstore.LastCommitID() require.Equal(t, emptyCommitID.Version, int64(0)) diff --git a/store/types/store.go b/store/types/store.go index 933a05ac00fb..511a547e2f68 100644 --- a/store/types/store.go +++ b/store/types/store.go @@ -8,7 +8,7 @@ import ( tmstrings "github.com/tendermint/tendermint/libs/strings" dbm "github.com/tendermint/tm-db" - pruningTypes "github.com/cosmos/cosmos-sdk/pruning/types" + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" "github.com/cosmos/cosmos-sdk/types/kv" ) @@ -23,8 +23,8 @@ type Committer interface { Commit() CommitID LastCommitID() CommitID - SetPruning(*pruningTypes.PruningOptions) - GetPruning() *pruningTypes.PruningOptions + SetPruning(pruningtypes.PruningOptions) + GetPruning() pruningtypes.PruningOptions } // Stores of MultiStore must implement CommitStore. @@ -162,9 +162,6 @@ type CommitMultiStore interface { // Panics on a nil key. GetCommitKVStore(key StoreKey) CommitKVStore - // GetCommitKVStores get all kv stores associated with the multistore. - GetCommitKVStores() map[StoreKey]CommitKVStore - // Load the latest persisted version. Called once after all calls to // Mount*Store() are complete. LoadLatestVersion() error @@ -303,7 +300,6 @@ const ( StoreTypeIAVL StoreTypeTransient StoreTypeMemory - StoreTypeSnapshot ) func (st StoreType) String() string { @@ -322,9 +318,6 @@ func (st StoreType) String() string { case StoreTypeMemory: return "StoreTypeMemory" - - case StoreTypeSnapshot: - return "StoreTypeSnapshot" } return "unknown store type" diff --git a/store/types/utils_test.go b/store/types/utils_test.go index 391b42b33023..2eac68dd02a1 100644 --- a/store/types/utils_test.go +++ b/store/types/utils_test.go @@ -9,17 +9,17 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/store/rootmulti" - "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/store/types" ) -func initTestStores(t *testing.T) (types.KVStore, types.KVStore) { +func initTestStores(t *testing.T) (sdk.KVStore, sdk.KVStore) { db := dbm.NewMemDB() ms := rootmulti.NewStore(db, log.NewNopLogger()) - key1 := types.NewKVStoreKey("store1") - key2 := types.NewKVStoreKey("store2") - require.NotPanics(t, func() { ms.MountStoreWithDB(key1, types.StoreTypeIAVL, db) }) - require.NotPanics(t, func() { ms.MountStoreWithDB(key2, types.StoreTypeIAVL, db) }) + key1 := sdk.NewKVStoreKey("store1") + key2 := sdk.NewKVStoreKey("store2") + require.NotPanics(t, func() { ms.MountStoreWithDB(key1, sdk.StoreTypeIAVL, db) }) + require.NotPanics(t, func() { ms.MountStoreWithDB(key2, sdk.StoreTypeIAVL, db) }) require.NoError(t, ms.LoadLatestVersion()) return ms.GetKVStore(key1), ms.GetKVStore(key2) } @@ -32,27 +32,27 @@ func TestDiffKVStores(t *testing.T) { store1.Set(k1, v1) store2.Set(k1, v1) - kvAs, kvBs := types.DiffKVStores(store1, store2, nil) + kvAs, kvBs := sdk.DiffKVStores(store1, store2, nil) require.Equal(t, 0, len(kvAs)) require.Equal(t, len(kvAs), len(kvBs)) // delete k1 from store2, which is now empty store2.Delete(k1) - kvAs, kvBs = types.DiffKVStores(store1, store2, nil) + kvAs, kvBs = sdk.DiffKVStores(store1, store2, nil) require.Equal(t, 1, len(kvAs)) require.Equal(t, len(kvAs), len(kvBs)) // set k1 in store2, different value than what store1 holds for k1 v2 := []byte("v2") store2.Set(k1, v2) - kvAs, kvBs = types.DiffKVStores(store1, store2, nil) + kvAs, kvBs = sdk.DiffKVStores(store1, store2, nil) require.Equal(t, 1, len(kvAs)) require.Equal(t, len(kvAs), len(kvBs)) // add k2 to store2 k2 := []byte("k2") store2.Set(k2, v2) - kvAs, kvBs = types.DiffKVStores(store1, store2, nil) + kvAs, kvBs = sdk.DiffKVStores(store1, store2, nil) require.Equal(t, 2, len(kvAs)) require.Equal(t, len(kvAs), len(kvBs)) @@ -66,7 +66,7 @@ func TestDiffKVStores(t *testing.T) { k1Prefixed := append(prefix, k1...) store1.Set(k1Prefixed, v1) store2.Set(k1Prefixed, v2) - kvAs, kvBs = types.DiffKVStores(store1, store2, [][]byte{prefix}) + kvAs, kvBs = sdk.DiffKVStores(store1, store2, [][]byte{prefix}) require.Equal(t, 0, len(kvAs)) require.Equal(t, len(kvAs), len(kvBs)) } @@ -74,16 +74,16 @@ func TestDiffKVStores(t *testing.T) { func TestPrefixEndBytes(t *testing.T) { t.Parallel() bs1 := []byte{0x23, 0xA5, 0x06} - require.True(t, bytes.Equal([]byte{0x23, 0xA5, 0x07}, types.PrefixEndBytes(bs1))) + require.True(t, bytes.Equal([]byte{0x23, 0xA5, 0x07}, sdk.PrefixEndBytes(bs1))) bs2 := []byte{0x23, 0xA5, 0xFF} - require.True(t, bytes.Equal([]byte{0x23, 0xA6}, types.PrefixEndBytes(bs2))) - require.Nil(t, types.PrefixEndBytes([]byte{0xFF})) - require.Nil(t, types.PrefixEndBytes(nil)) + require.True(t, bytes.Equal([]byte{0x23, 0xA6}, sdk.PrefixEndBytes(bs2))) + require.Nil(t, sdk.PrefixEndBytes([]byte{0xFF})) + require.Nil(t, sdk.PrefixEndBytes(nil)) } func TestInclusiveEndBytes(t *testing.T) { t.Parallel() - require.True(t, bytes.Equal([]byte{0x00}, types.InclusiveEndBytes(nil))) + require.True(t, bytes.Equal([]byte{0x00}, sdk.InclusiveEndBytes(nil))) bs := []byte("test") - require.True(t, bytes.Equal(append(bs, byte(0x00)), types.InclusiveEndBytes(bs))) + require.True(t, bytes.Equal(append(bs, byte(0x00)), sdk.InclusiveEndBytes(bs))) } diff --git a/testutil/network/network.go b/testutil/network/network.go index 4115670d3e05..d0cc566c2ec8 100644 --- a/testutil/network/network.go +++ b/testutil/network/network.go @@ -33,7 +33,7 @@ import ( "github.com/cosmos/cosmos-sdk/crypto/hd" "github.com/cosmos/cosmos-sdk/crypto/keyring" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" - pruningTypes "github.com/cosmos/cosmos-sdk/pruning/types" + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/server" "github.com/cosmos/cosmos-sdk/server/api" srvconfig "github.com/cosmos/cosmos-sdk/server/config" @@ -62,7 +62,7 @@ func NewAppConstructor(encodingCfg params.EncodingConfig) AppConstructor { val.Ctx.Logger, dbm.NewMemDB(), nil, true, make(map[int64]bool), val.Ctx.Config.RootDir, 0, encodingCfg, simapp.EmptyAppOptions{}, - baseapp.SetPruning(pruningTypes.NewPruningOptionsFromString(val.AppConfig.Pruning)), + baseapp.SetPruning(pruningtypes.NewPruningOptionsFromString(val.AppConfig.Pruning)), baseapp.SetMinGasPrices(val.AppConfig.MinGasPrices), ) } @@ -116,7 +116,7 @@ func DefaultConfig() Config { AccountTokens: sdk.TokensFromConsensusPower(1000, sdk.DefaultPowerReduction), StakingTokens: sdk.TokensFromConsensusPower(500, sdk.DefaultPowerReduction), BondedTokens: sdk.TokensFromConsensusPower(100, sdk.DefaultPowerReduction), - PruningStrategy: pruningTypes.PruningOptionNothing, + PruningStrategy: pruningtypes.PruningOptionNothing, CleanupDir: true, SigningAlgo: string(hd.Secp256k1Type), KeyringOptions: []keyring.Option{}, diff --git a/types/store.go b/types/store.go index dd89176a4bcc..8bc07d12ed37 100644 --- a/types/store.go +++ b/types/store.go @@ -5,17 +5,10 @@ import ( "sort" "strings" - pruningTypes "github.com/cosmos/cosmos-sdk/pruning/types" - snapshotTypes "github.com/cosmos/cosmos-sdk/snapshots/types" "github.com/cosmos/cosmos-sdk/store/types" "github.com/cosmos/cosmos-sdk/types/kv" ) -type ( - PruningOptions = pruningTypes.PruningOptions - SnapshotOptions = snapshotTypes.SnapshotOptions -) - type ( Store = types.Store Committer = types.Committer @@ -87,17 +80,6 @@ type ( MemoryStoreKey = types.MemoryStoreKey ) -type ( - PruningStrategy = pruningTypes.PruningStrategy -) - -const ( - Default PruningStrategy = pruningTypes.PruningDefault - Everything = pruningTypes.PruningEverything - Nothing = pruningTypes.PruningNothing - Custom = pruningTypes.PruningCustom -) - // assertNoCommonPrefix will panic if there are two keys: k1 and k2 in keys, such that // k1 is a prefix of k2 func assertNoPrefix(keys []string) { @@ -207,15 +189,3 @@ func NewGasMeter(limit Gas) GasMeter { func NewInfiniteGasMeter() GasMeter { return types.NewInfiniteGasMeter() } - -func NewSnapshotOptions(interval uint64, keepRecent uint32) *snapshotTypes.SnapshotOptions { - return snapshotTypes.NewSnapshotOptions(interval, keepRecent) -} - -func NewPruningOptions(pruningStrategy PruningStrategy) *pruningTypes.PruningOptions { - return pruningTypes.NewPruningOptions(pruningStrategy) -} - -func NewCustomPruningOptions(keepRecent, interval uint64) *pruningTypes.PruningOptions { - return pruningTypes.NewCustomPruningOptions(keepRecent, interval) -} diff --git a/x/upgrade/types/storeloader_test.go b/x/upgrade/types/storeloader_test.go index 4136296209a5..62a56acc5b13 100644 --- a/x/upgrade/types/storeloader_test.go +++ b/x/upgrade/types/storeloader_test.go @@ -14,9 +14,9 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/baseapp" + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/store/rootmulti" store "github.com/cosmos/cosmos-sdk/store/types" - pruningTypes "github.com/cosmos/cosmos-sdk/pruning/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -32,7 +32,7 @@ func defaultLogger() log.Logger { func initStore(t *testing.T, db dbm.DB, storeKey string, k, v []byte) { rs := rootmulti.NewStore(db, log.NewNopLogger()) - rs.SetPruning(pruningTypes.NewPruningOptions(pruningTypes.PruningNothing)) + rs.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) key := sdk.NewKVStoreKey(storeKey) rs.MountStoreWithDB(key, store.StoreTypeIAVL, nil) err := rs.LoadLatestVersion() @@ -49,7 +49,7 @@ func initStore(t *testing.T, db dbm.DB, storeKey string, k, v []byte) { func checkStore(t *testing.T, db dbm.DB, ver int64, storeKey string, k, v []byte) { rs := rootmulti.NewStore(db, log.NewNopLogger()) - rs.SetPruning(pruningTypes.NewPruningOptions(pruningTypes.PruningNothing)) + rs.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) key := sdk.NewKVStoreKey(storeKey) rs.MountStoreWithDB(key, store.StoreTypeIAVL, nil) err := rs.LoadLatestVersion() @@ -118,7 +118,7 @@ func TestSetLoader(t *testing.T) { initStore(t, db, tc.origStoreKey, k, v) // load the app with the existing db - opts := []func(*baseapp.BaseApp){baseapp.SetPruning(pruningTypes.NewPruningOptions(pruningTypes.PruningNothing))} + opts := []func(*baseapp.BaseApp){baseapp.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))} origapp := baseapp.NewBaseApp(t.Name(), defaultLogger(), db, nil, opts...) origapp.MountStores(sdk.NewKVStoreKey(tc.origStoreKey))