diff --git a/.github/workflows/atlas.yml b/.github/workflows/atlas.yml index 25e749439d..23a47fea84 100644 --- a/.github/workflows/atlas.yml +++ b/.github/workflows/atlas.yml @@ -3,7 +3,7 @@ name: Atlas on: push: branches: - - master + - main paths: - "x/**/atlas/*" pull_request: diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 830eb13077..435033445a 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,27 +1,26 @@ name: Lint -# Lint runs golangci-lint over the entire cosmos-sdk repository -# This workflow is run on every pull request and push to master -# The `golangci` will pass without running if no *.{go, mod, sum} files have been changed. on: - pull_request: push: + tags: + - v* branches: - - master + - main + pull_request: +permissions: + contents: read + # Optional: allow read access to pull request. Use with `only-new-issues` option. + # pull-requests: read jobs: golangci: name: golangci-lint runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v2.1.4 + - uses: actions/setup-go@v3 with: - go-version: 1.18 - - uses: technote-space/get-diff-action@v5 - id: git_diff + go-version: 1.19 + - uses: actions/checkout@v3 + - name: golangci-lint + uses: golangci/golangci-lint-action@v3 with: - PATTERNS: | - **/**.go - go.mod - go.sum - - name: run go linters - run: make lint-go - if: env.GIT_DIFF + # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version + version: latest \ No newline at end of file diff --git a/.github/workflows/sims.yml b/.github/workflows/sims.yml index 4d885b4689..4750c6c679 100644 --- a/.github/workflows/sims.yml +++ b/.github/workflows/sims.yml @@ -5,7 +5,7 @@ on: pull_request: push: branches: - - master + - main jobs: build: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f9961724a1..a3075a6d47 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,11 +1,11 @@ name: Tests / Code Coverage # Tests / Code Coverage workflow runs unit tests and uploads a code coverage report -# This workflow is run on pushes to master & every Pull Requests where a .go, .mod, .sum have been changed +# This workflow is run on pushes to main & every Pull Requests where a .go, .mod, .sum have been changed on: pull_request: push: branches: - - master + - main jobs: install-tparse: runs-on: ubuntu-latest @@ -222,7 +222,7 @@ jobs: # Disabled since this can't build # the contrib/images/simd-env/Dockerfile is looking for a - # db/go.mod file, which only exists on cosmos/cosmos-sdk master + # db/go.mod file, which only exists on cosmos/cosmos-sdk main # branch... # # liveness-test: diff --git a/.gitignore b/.gitignore index 0f2842e557..9013e7c9a9 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ *.swm *.swn *.pyc +.dccache # private files private[.-]* diff --git a/.golangci.yml b/.golangci.yml index bc6386847b..08df5b3583 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,51 +1,35 @@ run: tests: false -# # timeout for analysis, e.g. 30s, 5m, default is 1m -# timeout: 5m + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 5m linters: disable-all: true enable: - - bodyclose - - deadcode - depguard - dogsled - # - errcheck + - exportloopref - goconst - gocritic - - gofmt - - goimports + - gofumpt - gosec - gosimple - govet - ineffassign - misspell - nakedret - - prealloc - - revive - - exportloopref + - nolintlint - staticcheck - - structcheck - stylecheck - typecheck - unconvert - unused - - unparam - - misspell - # - wsl - - nolintlint issues: exclude-rules: - text: "Use of weak random number generator" linters: - gosec - - text: "comment on exported var" - linters: - - golint - - text: "don't use an underscore in package name" - linters: - - golint - text: "ST1003:" linters: - stylecheck @@ -54,7 +38,10 @@ issues: - text: "ST1016:" linters: - stylecheck - - path: "legacy" + - text: "should be written without leading space as" + linters: + - nolintlint + - path: "migrations" text: "SA1019:" linters: - staticcheck @@ -70,6 +57,5 @@ linters-settings: suggest-new: true nolintlint: allow-unused: false - allow-leading-space: true require-explanation: false - require-specific: false + require-specific: false \ No newline at end of file diff --git a/.mergify.yml b/.mergify.yml index 28ec751ea1..116341d012 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -1,8 +1,8 @@ pull_request_rules: - - name: automerge to master with label automerge and branch protection passing + - name: automerge to main with label automerge and branch protection passing conditions: - "#approved-reviews-by>1" - - base=master + - base=main - label=automerge actions: merge: @@ -11,7 +11,7 @@ pull_request_rules: commit_message: title+body - name: backport patches to v0.42.x branch conditions: - - base=master + - base=main - label=backport/0.42.x (Stargate) actions: backport: @@ -19,7 +19,7 @@ pull_request_rules: - release/v0.42.x - name: backport patches to v0.39.x branch conditions: - - base=master + - base=main - label=backport/0.39.x (Launchpad) actions: backport: diff --git a/CHANGELOG.md b/CHANGELOG.md index b40b50808b..5488976815 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,47 @@ Ref: https://keepachangelog.com/en/1.0.0/ ## [Unreleased] +## v0.45.9 - 2022-10-14 + +ATTENTION: + +This is a security release for the +[Dragonberry security advisory](https://forum.cosmos.network/t/ibc-security-advisory-dragonberry/7702). + +All users should upgrade immediately. + +Users *must* add a replace directive in their go.mod for the +new `ics23` package in the SDK: + +``` +replace ( + github.com/confio/ics23/go => github.com/cosmos/cosmos-sdk/ics23 +) + +``` + +### Features + +* [#13435](https://github.com/cosmos/cosmos-sdk/pull/13435) Extend error context when a simulation fails. + +### Improvements + +* [#13369](https://github.com/cosmos/cosmos-sdk/pull/13369) Improve UX for `keyring.List` by returning all retrieved keys. +* [#13323](https://github.com/cosmos/cosmos-sdk/pull/13323) Ensure `withdraw_rewards` rewards are emitted from all actions that result in rewards being withdrawn. +* [#13321](https://github.com/cosmos/cosmos-sdk/pull/13321) Add flag to disable fast node migration and usage. +* (store) [#13326](https://github.com/cosmos/cosmos-sdk/pull/13326) Implementation of ADR-038 file StreamingService, backport #8664. +* (store) [#13540](https://github.com/cosmos/cosmos-sdk/pull/13540) Default fastnode migration to false to prevent suprises. Operators must enable it, unless they have it enabled already. + +### API Breaking Changes + +* (cli) [#13089](https://github.com/cosmos/cosmos-sdk/pull/13089) Fix rollback command don't actually delete multistore versions, added method `RollbackToVersion` to interface `CommitMultiStore` and added method `CommitMultiStore` to `Application` interface. + +### Bug Fixes + +* [#...](https://github.com/cosmos/cosmos-sdk/pull/) Implement dragonberry security patch. + * For applying the patch please refer to the [RELEASE NOTES](./RELEASE_NOTES.md) +* (store) [#13459](https://github.com/cosmos/cosmos-sdk/pull/13459) Don't let state listener observe the uncommitted writes. + ## v0.45.8 - 2022-08-25 ### Improvements @@ -45,6 +86,8 @@ Ref: https://keepachangelog.com/en/1.0.0/ * [#12885](https://github.com/cosmos/cosmos-sdk/pull/12885) Amortize cost of processing cache KV store. * [#12970](https://github.com/cosmos/cosmos-sdk/pull/12970) Bump Tendermint to `v0.34.21` and IAVL to `v0.19.1`. * [#12693](https://github.com/cosmos/cosmos-sdk/pull/12693) Make sure the order of each node is consistent when emitting proto events. +* (simapp) [#13107](https://github.com/cosmos/cosmos-sdk/pull/13107) Call `SetIAVLCacheSize` with the configured value in simapp. +* (cli) [#12742](https://github.com/cosmos/cosmos-sdk/pull/12742) Add the `prune` CLI cmd to manually prune app store history versions based on the pruning options. ### Bug Fixes @@ -69,9 +112,6 @@ Ref: https://keepachangelog.com/en/1.0.0/ ### Bug Fixes * (x/mint) [#12384](https://github.com/cosmos/cosmos-sdk/pull/12384) Ensure `GoalBonded` must be positive when performing `x/mint` parameter validation. -* (simapp) [#12437](https://github.com/cosmos/cosmos-sdk/pull/12437) fix the non-determinstic behavior in simulations caused by `GenTx` and check -empty coins slice before it is used to create `banktype.MsgSend`. -* (x/capability) [12818](https://github.com/cosmos/cosmos-sdk/pull/12818) Use fixed length hex for pointer at FwdCapabilityKey. ## [v0.45.6](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.45.6) - 2022-06-28 diff --git a/Makefile b/Makefile index aac32a7efd..767588c4d0 100644 --- a/Makefile +++ b/Makefile @@ -328,9 +328,8 @@ lint-go: .PHONY: lint lint-fix format: - find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -path "./client/docs/statik/statik.go" -not -path "./tests/mocks/*" -not -name '*.pb.go' | xargs gofmt -w -s - find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -path "./client/docs/statik/statik.go" -not -path "./tests/mocks/*" -not -name '*.pb.go' | xargs misspell -w - find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -path "./client/docs/statik/statik.go" -not -path "./tests/mocks/*" -not -name '*.pb.go' | xargs goimports -w -local github.com/cosmos/cosmos-sdk + find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -path "./client/docs/statik/statik.go" -not -path "./tests/mocks/*" -not -name "*.pb.go" -not -name "*.pb.gw.go" -not -name "*.pulsar.go" -not -path "./crypto/keys/secp256k1/*" | xargs gofumpt -w -l + golangci-lint run --fix .PHONY: format ############################################################################### @@ -396,12 +395,12 @@ proto-lint: @$(DOCKER_BUF) lint --error-format=json proto-check-breaking: - @$(DOCKER_BUF) breaking --against $(HTTPS_GIT)#branch=master + @$(DOCKER_BUF) breaking --against $(HTTPS_GIT)#branch=main TM_URL = https://raw.githubusercontent.com/tendermint/tendermint/v0.34.0-rc6/proto/tendermint GOGO_PROTO_URL = https://raw.githubusercontent.com/regen-network/protobuf/cosmos -COSMOS_PROTO_URL = https://raw.githubusercontent.com/regen-network/cosmos-proto/master +COSMOS_PROTO_URL = https://raw.githubusercontent.com/regen-network/cosmos-proto/main CONFIO_URL = https://raw.githubusercontent.com/confio/ics23/v0.6.3 TM_CRYPTO_TYPES = third_party/proto/tendermint/crypto diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 1584f447c4..9c3a549cb9 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -1,7 +1,20 @@ -# Cosmos SDK v0.45.8 Release Notes +# Cosmos SDK v0.45.9 Release Notes -This release introduces few improvements, such as the speed-up of the crisis invariant checks (thanks to a Juno bounty), and updated Tendermint and IAVL dependencies. +This is a security release for the +[Dragonberry security advisory](https://forum.cosmos.network/t/ibc-security-advisory-dragonberry/7702). +Please upgrade ASAP. -See the [Cosmos SDK v0.45.8 Changelog](https://github.com/cosmos/cosmos-sdk/blob/v0.45.8/CHANGELOG.md) for the exhaustive list of all changes. +Next to this, we have also included a few minor bugfixes. -**Full Commit History**: https://github.com/cosmos/cosmos-sdk/compare/v0.45.7...v0.45.8 +Chains must add the following to their go.mod for the application: + +```go +replace github.com/confio/ics23/go => github.com/cosmos/cosmos-sdk/ics23 +``` + +Bumping the SDK version should be smooth, however, feel free to tag core devs to review your upgrading PR: + +- **CET**: @tac0turtle, @okwme, @AdityaSripal, @colin-axner, @julienrbrt +- **EST**: @ebuchman, @alexanderbez, @aaronc +- **PST**: @jtremback, @nicolaslara, @czarcas7ic, @p0mvn +- **CDT**: @ValarDragon, @zmanian diff --git a/baseapp/abci.go b/baseapp/abci.go index 46fe264a02..b934255da4 100644 --- a/baseapp/abci.go +++ b/baseapp/abci.go @@ -2,6 +2,7 @@ package baseapp import ( "crypto/sha256" + "encoding/json" "errors" "fmt" "os" @@ -122,24 +123,6 @@ func (app *BaseApp) SetOption(req abci.RequestSetOption) (res abci.ResponseSetOp return } -// FilterPeerByAddrPort filters peers by address/port. -func (app *BaseApp) FilterPeerByAddrPort(info string) abci.ResponseQuery { - if app.addrPeerFilter != nil { - return app.addrPeerFilter(info) - } - - return abci.ResponseQuery{} -} - -// FilterPeerByID filters peers by node ID. -func (app *BaseApp) FilterPeerByID(info string) abci.ResponseQuery { - if app.idPeerFilter != nil { - return app.idPeerFilter(info) - } - - return abci.ResponseQuery{} -} - // BeginBlock implements the ABCI application interface. func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeginBlock) { defer telemetry.MeasureSince(time.Now(), "abci", "begin_block") @@ -196,6 +179,14 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg } // set the signed validators for addition to context in deliverTx app.voteInfos = req.LastCommitInfo.GetVotes() + + // call the hooks with the BeginBlock messages + for _, streamingListener := range app.abciListeners { + if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err) + } + } + return res } @@ -216,6 +207,13 @@ func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBloc res.ConsensusParamUpdates = cp } + // call the streaming service hooks with the EndBlock messages + for _, streamingListener := range app.abciListeners { + if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err) + } + } + return res } @@ -260,9 +258,17 @@ func (app *BaseApp) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { // Otherwise, the ResponseDeliverTx will contain releveant error information. // Regardless of tx execution outcome, the ResponseDeliverTx will contain relevant // gas execution context. -func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { +func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) (res abci.ResponseDeliverTx) { defer telemetry.MeasureSince(time.Now(), "abci", "deliver_tx") + defer func() { + for _, streamingListener := range app.abciListeners { + if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("DeliverTx listening hook failed", "err", err) + } + } + }() + gInfo := sdk.GasInfo{} resultStr := "successful" @@ -782,6 +788,22 @@ func handleQueryApp(app *BaseApp, path []string, req abci.RequestQuery) abci.Res Value: []byte(app.version), } + case "snapshots": + var responseValue []byte + + response := app.ListSnapshots(abci.RequestListSnapshots{}) + + responseValue, err := json.Marshal(response) + if err != nil { + sdkerrors.QueryResult(sdkerrors.Wrap(err, fmt.Sprintf("failed to marshal list snapshots response %v", response))) + } + + return abci.ResponseQuery{ + Codespace: sdkerrors.RootCodespace, + Height: req.Height, + Value: responseValue, + } + default: return sdkerrors.QueryResultWithDebug(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unknown query: %s", path), app.trace) } @@ -817,35 +839,6 @@ func handleQueryStore(app *BaseApp, path []string, req abci.RequestQuery) abci.R return resp } -func handleQueryP2P(app *BaseApp, path []string) abci.ResponseQuery { - // "/p2p" prefix for p2p queries - if len(path) < 4 { - return sdkerrors.QueryResultWithDebug( - sdkerrors.Wrap( - sdkerrors.ErrUnknownRequest, "path should be p2p filter ", - ), app.trace) - } - - var resp abci.ResponseQuery - - cmd, typ, arg := path[1], path[2], path[3] - switch cmd { - case "filter": - switch typ { - case "addr": - resp = app.FilterPeerByAddrPort(arg) - - case "id": - resp = app.FilterPeerByID(arg) - } - - default: - resp = sdkerrors.QueryResultWithDebug(sdkerrors.Wrap(sdkerrors.ErrUnknownRequest, "expected second parameter to be 'filter'"), app.trace) - } - - return resp -} - func handleQueryCustom(app *BaseApp, path []string, req abci.RequestQuery) abci.ResponseQuery { // path[0] should be "custom" because "/custom" prefix is required for keeper // queries. diff --git a/baseapp/abci_test.go b/baseapp/abci_test.go index b382a5a389..e6ce5c9faf 100644 --- a/baseapp/abci_test.go +++ b/baseapp/abci_test.go @@ -1,6 +1,7 @@ package baseapp import ( + "encoding/json" "testing" "github.com/stretchr/testify/require" @@ -159,3 +160,40 @@ func TestBaseAppCreateQueryContext(t *testing.T) { }) } } + +type paramStore struct { + db *dbm.MemDB +} + +func (ps *paramStore) Set(_ sdk.Context, key []byte, value interface{}) { + bz, err := json.Marshal(value) + if err != nil { + panic(err) + } + + ps.db.Set(key, bz) +} + +func (ps *paramStore) Has(_ sdk.Context, key []byte) bool { + ok, err := ps.db.Has(key) + if err != nil { + panic(err) + } + + return ok +} + +func (ps *paramStore) Get(_ sdk.Context, key []byte, ptr interface{}) { + bz, err := ps.db.Get(key) + if err != nil { + panic(err) + } + + if len(bz) == 0 { + return + } + + if err := json.Unmarshal(bz, ptr); err != nil { + panic(err) + } +} diff --git a/baseapp/baseapp.go b/baseapp/baseapp.go index 4f73e19213..b51f7805bd 100644 --- a/baseapp/baseapp.go +++ b/baseapp/baseapp.go @@ -43,32 +43,21 @@ type ( ) // BaseApp reflects the ABCI application implementation. -type BaseApp struct { // nolint: maligned +type BaseApp struct { //nolint: maligned // initialized on creation logger log.Logger - name string // application name from abci.Info - db dbm.DB // common DB backend - cms sdk.CommitMultiStore // Main (uncached) state - storeLoader StoreLoader // function to handle store loading, may be overridden with SetStoreLoader() - router sdk.Router // handle any kind of message - queryRouter sdk.QueryRouter // router for redirecting query calls - grpcQueryRouter *GRPCQueryRouter // router for redirecting gRPC query calls - msgServiceRouter *MsgServiceRouter // router for redirecting Msg service messages + name string // application name from abci.Info interfaceRegistry types.InterfaceRegistry txDecoder sdk.TxDecoder // unmarshal []byte into sdk.Tx - anteHandler sdk.AnteHandler // ante handler for fee and auth - initChainer sdk.InitChainer // initialize state with validators and state blob - beginBlocker sdk.BeginBlocker // logic to run before any txs - endBlocker sdk.EndBlocker // logic to run after all txs, and to determine valset changes - addrPeerFilter sdk.PeerFilter // filter peers by address and port - idPeerFilter sdk.PeerFilter // filter peers by node ID - fauxMerkleMode bool // if true, IAVL MountStores uses MountStoresDB for simulation speed. + anteHandler sdk.AnteHandler // ante handler for fee and auth - // manages snapshots, i.e. dumps of app state at certain intervals - snapshotManager *snapshots.Manager - snapshotInterval uint64 // block interval between state sync snapshots - snapshotKeepRecent uint32 // recent state sync snapshots to keep + appStore + baseappVersions + peerFilters + snapshotData + abciData + moduleRouter // volatile states: // @@ -77,12 +66,6 @@ type BaseApp struct { // nolint: maligned checkState *state // for CheckTx deliverState *state // for DeliverTx - // an inter-block write-through cache provided to the context during deliverState - interBlockCache sdk.MultiStorePersistentCache - - // absent validators from begin block - voteInfos []abci.VoteInfo - // paramStore is used to query for ABCI consensus parameters from an // application parameter store. paramStore ParamStore @@ -115,13 +98,6 @@ type BaseApp struct { // nolint: maligned // ResponseCommit.RetainHeight. minRetainBlocks uint64 - // application's version string - version string - - // application's protocol version that increments on every upgrade - // if BaseApp is passed to the upgrade keeper's NewKeeper method. - appVersion uint64 - // recovery handler for app.runTx method runTxRecoveryMiddleware recoveryMiddleware @@ -131,6 +107,55 @@ type BaseApp struct { // nolint: maligned // indexEvents defines the set of events in the form {eventType}.{attributeKey}, // which informs Tendermint what to index. If empty, all events will be indexed. indexEvents map[string]struct{} + + // abciListeners for hooking into the ABCI message processing of the BaseApp + // and exposing the requests and responses to external consumers + abciListeners []ABCIListener +} + +type appStore struct { + db dbm.DB // common DB backend + cms sdk.CommitMultiStore // Main (uncached) state + storeLoader StoreLoader // function to handle store loading, may be overridden with SetStoreLoader() + + // an inter-block write-through cache provided to the context during deliverState + interBlockCache sdk.MultiStorePersistentCache + + fauxMerkleMode bool // if true, IAVL MountStores uses MountStoresDB for simulation speed. +} + +type moduleRouter struct { + router sdk.Router // handle any kind of message + queryRouter sdk.QueryRouter // router for redirecting query calls + grpcQueryRouter *GRPCQueryRouter // router for redirecting gRPC query calls + msgServiceRouter *MsgServiceRouter // router for redirecting Msg service messages +} + +type abciData struct { + initChainer sdk.InitChainer // initialize state with validators and state blob + beginBlocker sdk.BeginBlocker // logic to run before any txs + endBlocker sdk.EndBlocker // logic to run after all txs, and to determine valset changes + + // absent validators from begin block + voteInfos []abci.VoteInfo +} + +type baseappVersions struct { + // application's version string + version string + + // application's protocol version that increments on every upgrade + // if BaseApp is passed to the upgrade keeper's NewKeeper method. + appVersion uint64 +} + +// should really get handled in some db struct +// which then has a sub-item, persistence fields +type snapshotData struct { + // manages snapshots, i.e. dumps of app state at certain intervals + snapshotManager *snapshots.Manager + snapshotInterval uint64 // block interval between state sync snapshots + snapshotKeepRecent uint32 // recent state sync snapshots to keep } // NewBaseApp returns a reference to an initialized BaseApp. It accepts a @@ -142,17 +167,21 @@ func NewBaseApp( name string, logger log.Logger, db dbm.DB, txDecoder sdk.TxDecoder, options ...func(*BaseApp), ) *BaseApp { app := &BaseApp{ - logger: logger, - name: name, - db: db, - cms: store.NewCommitMultiStore(db), - storeLoader: DefaultStoreLoader, - router: NewRouter(), - queryRouter: NewQueryRouter(), - grpcQueryRouter: NewGRPCQueryRouter(), - msgServiceRouter: NewMsgServiceRouter(), - txDecoder: txDecoder, - fauxMerkleMode: false, + logger: logger, + name: name, + appStore: appStore{ + db: db, + cms: store.NewCommitMultiStore(db), + storeLoader: DefaultStoreLoader, + fauxMerkleMode: false, + }, + moduleRouter: moduleRouter{ + router: NewRouter(), + queryRouter: NewQueryRouter(), + grpcQueryRouter: NewGRPCQueryRouter(), + msgServiceRouter: NewMsgServiceRouter(), + }, + txDecoder: txDecoder, } for _, option := range options { @@ -273,11 +302,8 @@ func DefaultStoreLoader(ms sdk.CommitMultiStore) error { // CommitMultiStore returns the root multi-store. // App constructor can use this to access the `cms`. -// UNSAFE: only safe to use during app initialization. +// UNSAFE: must not be used during the abci life cycle. func (app *BaseApp) CommitMultiStore() sdk.CommitMultiStore { - if app.sealed { - panic("cannot call CommitMultiStore() after baseapp is sealed") - } return app.cms } diff --git a/baseapp/baseapp_test.go b/baseapp/baseapp_test.go index 0b85da8df9..f9cdfcb714 100644 --- a/baseapp/baseapp_test.go +++ b/baseapp/baseapp_test.go @@ -1,19 +1,10 @@ package baseapp import ( - "bytes" - "encoding/binary" "encoding/json" - "fmt" - "io/ioutil" - "math/rand" "os" - "strings" - "sync" "testing" - "time" - "github.com/gogo/protobuf/jsonpb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" @@ -22,13 +13,8 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/codec" - "github.com/cosmos/cosmos-sdk/snapshots" - snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" - "github.com/cosmos/cosmos-sdk/store/rootmulti" store "github.com/cosmos/cosmos-sdk/store/types" - "github.com/cosmos/cosmos-sdk/testutil/testdata" sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" "github.com/cosmos/cosmos-sdk/x/auth/legacy/legacytx" ) @@ -37,43 +23,6 @@ var ( capKey2 = sdk.NewKVStoreKey("key2") ) -type paramStore struct { - db *dbm.MemDB -} - -func (ps *paramStore) Set(_ sdk.Context, key []byte, value interface{}) { - bz, err := json.Marshal(value) - if err != nil { - panic(err) - } - - ps.db.Set(key, bz) -} - -func (ps *paramStore) Has(_ sdk.Context, key []byte) bool { - ok, err := ps.db.Has(key) - if err != nil { - panic(err) - } - - return ok -} - -func (ps *paramStore) Get(_ sdk.Context, key []byte, ptr interface{}) { - bz, err := ps.db.Get(key) - if err != nil { - panic(err) - } - - if len(bz) == 0 { - return - } - - if err := json.Unmarshal(bz, ptr); err != nil { - panic(err) - } -} - func defaultLogger() log.Logger { return log.NewTMLogger(log.NewSyncWriter(os.Stdout)).With("module", "sdk/app") } @@ -120,278 +69,6 @@ func setupBaseApp(t *testing.T, options ...func(*BaseApp)) *BaseApp { return app } -// simple one store baseapp with data and snapshots. Each tx is 1 MB in size (uncompressed). -func setupBaseAppWithSnapshots(t *testing.T, blocks uint, blockTxs int, options ...func(*BaseApp)) (*BaseApp, func()) { - codec := codec.NewLegacyAmino() - registerTestCodec(codec) - routerOpt := func(bapp *BaseApp) { - bapp.Router().AddRoute(sdk.NewRoute(routeMsgKeyValue, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - kv := msg.(*msgKeyValue) - bapp.cms.GetCommitKVStore(capKey2).Set(kv.Key, kv.Value) - return &sdk.Result{}, nil - })) - } - - snapshotInterval := uint64(2) - snapshotTimeout := 1 * time.Minute - snapshotDir, err := ioutil.TempDir("", "baseapp") - require.NoError(t, err) - snapshotStore, err := snapshots.NewStore(dbm.NewMemDB(), snapshotDir) - require.NoError(t, err) - teardown := func() { - os.RemoveAll(snapshotDir) - } - - app := setupBaseApp(t, append(options, - SetSnapshotStore(snapshotStore), - SetSnapshotInterval(snapshotInterval), - SetPruning(sdk.PruningOptions{KeepEvery: 1}), - routerOpt)...) - - app.InitChain(abci.RequestInitChain{}) - - r := rand.New(rand.NewSource(3920758213583)) - keyCounter := 0 - for height := int64(1); height <= int64(blocks); height++ { - app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: height}}) - for txNum := 0; txNum < blockTxs; txNum++ { - tx := txTest{Msgs: []sdk.Msg{}} - for msgNum := 0; msgNum < 100; msgNum++ { - key := []byte(fmt.Sprintf("%v", keyCounter)) - value := make([]byte, 10000) - _, err := r.Read(value) - require.NoError(t, err) - tx.Msgs = append(tx.Msgs, msgKeyValue{Key: key, Value: value}) - keyCounter++ - } - txBytes, err := codec.Marshal(tx) - require.NoError(t, err) - resp := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.True(t, resp.IsOK(), "%v", resp.String()) - } - app.EndBlock(abci.RequestEndBlock{Height: height}) - app.Commit() - - // Wait for snapshot to be taken, since it happens asynchronously. - if uint64(height)%snapshotInterval == 0 { - start := time.Now() - for { - if time.Since(start) > snapshotTimeout { - t.Errorf("timed out waiting for snapshot after %v", snapshotTimeout) - } - snapshot, err := snapshotStore.Get(uint64(height), snapshottypes.CurrentFormat) - require.NoError(t, err) - if snapshot != nil { - break - } - time.Sleep(100 * time.Millisecond) - } - } - } - - return app, teardown -} - -func TestMountStores(t *testing.T) { - app := setupBaseApp(t) - - // check both stores - store1 := app.cms.GetCommitKVStore(capKey1) - require.NotNil(t, store1) - store2 := app.cms.GetCommitKVStore(capKey2) - require.NotNil(t, store2) -} - -// Test that we can make commits and then reload old versions. -// Test that LoadLatestVersion actually does. -func TestLoadVersion(t *testing.T) { - logger := defaultLogger() - pruningOpt := SetPruning(store.PruneNothing) - db := dbm.NewMemDB() - name := t.Name() - app := NewBaseApp(name, logger, db, nil, pruningOpt) - - // make a cap key and mount the store - err := app.LoadLatestVersion() // needed to make stores non-nil - require.Nil(t, err) - - emptyCommitID := sdk.CommitID{} - - // fresh store has zero/empty last commit - lastHeight := app.LastBlockHeight() - lastID := app.LastCommitID() - require.Equal(t, int64(0), lastHeight) - require.Equal(t, emptyCommitID, lastID) - - // execute a block, collect commit ID - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - res := app.Commit() - commitID1 := sdk.CommitID{Version: 1, Hash: res.Data} - - // execute a block, collect commit ID - header = tmproto.Header{Height: 2} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - res = app.Commit() - commitID2 := sdk.CommitID{Version: 2, Hash: res.Data} - - // reload with LoadLatestVersion - app = NewBaseApp(name, logger, db, nil, pruningOpt) - app.MountStores() - err = app.LoadLatestVersion() - require.Nil(t, err) - testLoadVersionHelper(t, app, int64(2), commitID2) - - // reload with LoadVersion, see if you can commit the same block and get - // the same result - app = NewBaseApp(name, logger, db, nil, pruningOpt) - err = app.LoadVersion(1) - require.Nil(t, err) - testLoadVersionHelper(t, app, int64(1), commitID1) - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - app.Commit() - testLoadVersionHelper(t, app, int64(2), commitID2) -} - -func useDefaultLoader(app *BaseApp) { - app.SetStoreLoader(DefaultStoreLoader) -} - -func initStore(t *testing.T, db dbm.DB, storeKey string, k, v []byte) { - rs := rootmulti.NewStore(db, log.NewNopLogger()) - rs.SetPruning(store.PruneNothing) - key := sdk.NewKVStoreKey(storeKey) - rs.MountStoreWithDB(key, store.StoreTypeIAVL, nil) - err := rs.LoadLatestVersion() - require.Nil(t, err) - require.Equal(t, int64(0), rs.LastCommitID().Version) - - // write some data in substore - kv, _ := rs.GetStore(key).(store.KVStore) - require.NotNil(t, kv) - kv.Set(k, v) - commitID := rs.Commit() - require.Equal(t, int64(1), commitID.Version) -} - -func checkStore(t *testing.T, db dbm.DB, ver int64, storeKey string, k, v []byte) { - rs := rootmulti.NewStore(db, log.NewNopLogger()) - rs.SetPruning(store.PruneDefault) - key := sdk.NewKVStoreKey(storeKey) - rs.MountStoreWithDB(key, store.StoreTypeIAVL, nil) - err := rs.LoadLatestVersion() - require.Nil(t, err) - require.Equal(t, ver, rs.LastCommitID().Version) - - // query data in substore - kv, _ := rs.GetStore(key).(store.KVStore) - require.NotNil(t, kv) - require.Equal(t, v, kv.Get(k)) -} - -// Test that we can make commits and then reload old versions. -// Test that LoadLatestVersion actually does. -func TestSetLoader(t *testing.T) { - cases := map[string]struct { - setLoader func(*BaseApp) - origStoreKey string - loadStoreKey string - }{ - "don't set loader": { - origStoreKey: "foo", - loadStoreKey: "foo", - }, - "default loader": { - setLoader: useDefaultLoader, - origStoreKey: "foo", - loadStoreKey: "foo", - }, - } - - k := []byte("key") - v := []byte("value") - - for name, tc := range cases { - tc := tc - t.Run(name, func(t *testing.T) { - // prepare a db with some data - db := dbm.NewMemDB() - initStore(t, db, tc.origStoreKey, k, v) - - // load the app with the existing db - opts := []func(*BaseApp){SetPruning(store.PruneNothing)} - if tc.setLoader != nil { - opts = append(opts, tc.setLoader) - } - app := NewBaseApp(t.Name(), defaultLogger(), db, nil, opts...) - app.MountStores(sdk.NewKVStoreKey(tc.loadStoreKey)) - err := app.LoadLatestVersion() - require.Nil(t, err) - - // "execute" one block - app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: 2}}) - res := app.Commit() - require.NotNil(t, res.Data) - - // check db is properly updated - checkStore(t, db, 2, tc.loadStoreKey, k, v) - checkStore(t, db, 2, tc.loadStoreKey, []byte("foo"), nil) - }) - } -} - -func TestVersionSetterGetter(t *testing.T) { - logger := defaultLogger() - pruningOpt := SetPruning(store.PruneDefault) - db := dbm.NewMemDB() - name := t.Name() - app := NewBaseApp(name, logger, db, nil, pruningOpt) - - require.Equal(t, "", app.Version()) - res := app.Query(abci.RequestQuery{Path: "app/version"}) - require.True(t, res.IsOK()) - require.Equal(t, "", string(res.Value)) - - versionString := "1.0.0" - app.SetVersion(versionString) - require.Equal(t, versionString, app.Version()) - res = app.Query(abci.RequestQuery{Path: "app/version"}) - require.True(t, res.IsOK()) - require.Equal(t, versionString, string(res.Value)) -} - -func TestLoadVersionInvalid(t *testing.T) { - logger := log.NewNopLogger() - pruningOpt := SetPruning(store.PruneNothing) - db := dbm.NewMemDB() - name := t.Name() - app := NewBaseApp(name, logger, db, nil, pruningOpt) - - err := app.LoadLatestVersion() - require.Nil(t, err) - - // require error when loading an invalid version - err = app.LoadVersion(-1) - require.Error(t, err) - - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - res := app.Commit() - commitID1 := sdk.CommitID{Version: 1, Hash: res.Data} - - // create a new app with the stores mounted under the same cap key - app = NewBaseApp(name, logger, db, nil, pruningOpt) - - // require we can load the latest version - err = app.LoadVersion(1) - require.Nil(t, err) - testLoadVersionHelper(t, app, int64(1), commitID1) - - // require error when loading an invalid version - err = app.LoadVersion(2) - require.Error(t, err) -} - func TestLoadVersionPruning(t *testing.T) { logger := log.NewNopLogger() pruningOptions := store.PruningOptions{ @@ -455,1587 +132,65 @@ func testLoadVersionHelper(t *testing.T, app *BaseApp, expectedHeight int64, exp require.Equal(t, expectedID, lastID) } -func TestOptionFunction(t *testing.T) { - logger := defaultLogger() - db := dbm.NewMemDB() - bap := NewBaseApp("starting name", logger, db, nil, testChangeNameHelper("new name")) - require.Equal(t, bap.name, "new name", "BaseApp should have had name changed via option function") -} - -func testChangeNameHelper(name string) func(*BaseApp) { - return func(bap *BaseApp) { - bap.name = name - } -} - -// Test that txs can be unmarshalled and read and that -// correct error codes are returned when not -func TestTxDecoder(t *testing.T) { - codec := codec.NewLegacyAmino() - registerTestCodec(codec) - - app := newBaseApp(t.Name()) - tx := newTxCounter(1, 0) - txBytes := codec.MustMarshal(tx) - - dTx, err := app.txDecoder(txBytes) - require.NoError(t, err) - - cTx := dTx.(txTest) - require.Equal(t, tx.Counter, cTx.Counter) -} - -// Test that Info returns the latest committed state. -func TestInfo(t *testing.T) { - app := newBaseApp(t.Name()) - - // ----- test an empty response ------- - reqInfo := abci.RequestInfo{} - res := app.Info(reqInfo) - - // should be empty - assert.Equal(t, "", res.Version) - assert.Equal(t, t.Name(), res.GetData()) - assert.Equal(t, int64(0), res.LastBlockHeight) - require.Equal(t, []uint8(nil), res.LastBlockAppHash) - require.Equal(t, app.AppVersion(), res.AppVersion) - // ----- test a proper response ------- - // TODO -} - -func TestBaseAppOptionSeal(t *testing.T) { - app := setupBaseApp(t) - - require.Panics(t, func() { - app.SetName("") - }) - require.Panics(t, func() { - app.SetVersion("") - }) - require.Panics(t, func() { - app.SetDB(nil) - }) - require.Panics(t, func() { - app.SetCMS(nil) - }) - require.Panics(t, func() { - app.SetInitChainer(nil) - }) - require.Panics(t, func() { - app.SetBeginBlocker(nil) - }) - require.Panics(t, func() { - app.SetEndBlocker(nil) - }) - require.Panics(t, func() { - app.SetAnteHandler(nil) - }) - require.Panics(t, func() { - app.SetAddrPeerFilter(nil) - }) - require.Panics(t, func() { - app.SetIDPeerFilter(nil) - }) - require.Panics(t, func() { - app.SetFauxMerkleMode() - }) - require.Panics(t, func() { - app.SetRouter(NewRouter()) - }) -} - func TestSetMinGasPrices(t *testing.T) { minGasPrices := sdk.DecCoins{sdk.NewInt64DecCoin("stake", 5000)} app := newBaseApp(t.Name(), SetMinGasPrices(minGasPrices.String())) require.Equal(t, minGasPrices, app.minGasPrices) } -func TestInitChainer(t *testing.T) { - name := t.Name() - // keep the db and logger ourselves so - // we can reload the same app later - db := dbm.NewMemDB() - logger := defaultLogger() - app := NewBaseApp(name, logger, db, nil) - capKey := sdk.NewKVStoreKey("main") - capKey2 := sdk.NewKVStoreKey("key2") - app.MountStores(capKey, capKey2) - - // set a value in the store on init chain - key, value := []byte("hello"), []byte("goodbye") - var initChainer sdk.InitChainer = func(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain { - store := ctx.KVStore(capKey) - store.Set(key, value) - return abci.ResponseInitChain{} - } - - query := abci.RequestQuery{ - Path: "/store/main/key", - Data: key, - } - - // initChainer is nil - nothing happens +func TestGetMaximumBlockGas(t *testing.T) { + app := setupBaseApp(t) app.InitChain(abci.RequestInitChain{}) - res := app.Query(query) - require.Equal(t, 0, len(res.Value)) - - // set initChainer and try again - should see the value - app.SetInitChainer(initChainer) - - // stores are mounted and private members are set - sealing baseapp - err := app.LoadLatestVersion() // needed to make stores non-nil - require.Nil(t, err) - require.Equal(t, int64(0), app.LastBlockHeight()) - - initChainRes := app.InitChain(abci.RequestInitChain{AppStateBytes: []byte("{}"), ChainId: "test-chain-id"}) // must have valid JSON genesis file, even if empty - - // The AppHash returned by a new chain is the sha256 hash of "". - // $ echo -n '' | sha256sum - // e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - require.Equal( - t, - []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}, - initChainRes.AppHash, - ) - - // assert that chainID is set correctly in InitChain - chainID := app.deliverState.ctx.ChainID() - require.Equal(t, "test-chain-id", chainID, "ChainID in deliverState not set correctly in InitChain") - - chainID = app.checkState.ctx.ChainID() - require.Equal(t, "test-chain-id", chainID, "ChainID in checkState not set correctly in InitChain") - - app.Commit() - res = app.Query(query) - require.Equal(t, int64(1), app.LastBlockHeight()) - require.Equal(t, value, res.Value) - - // reload app - app = NewBaseApp(name, logger, db, nil) - app.SetInitChainer(initChainer) - app.MountStores(capKey, capKey2) - err = app.LoadLatestVersion() // needed to make stores non-nil - require.Nil(t, err) - require.Equal(t, int64(1), app.LastBlockHeight()) - - // ensure we can still query after reloading - res = app.Query(query) - require.Equal(t, value, res.Value) - - // commit and ensure we can still query - header := tmproto.Header{Height: app.LastBlockHeight() + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - app.Commit() - - res = app.Query(query) - require.Equal(t, value, res.Value) -} - -func TestInitChain_WithInitialHeight(t *testing.T) { - name := t.Name() - db := dbm.NewMemDB() - logger := defaultLogger() - app := NewBaseApp(name, logger, db, nil) - - app.InitChain( - abci.RequestInitChain{ - InitialHeight: 3, - }, - ) - app.Commit() - - require.Equal(t, int64(3), app.LastBlockHeight()) -} - -func TestBeginBlock_WithInitialHeight(t *testing.T) { - name := t.Name() - db := dbm.NewMemDB() - logger := defaultLogger() - app := NewBaseApp(name, logger, db, nil) - - app.InitChain( - abci.RequestInitChain{ - InitialHeight: 3, - }, - ) - - require.PanicsWithError(t, "invalid height: 4; expected: 3", func() { - app.BeginBlock(abci.RequestBeginBlock{ - Header: tmproto.Header{ - Height: 4, - }, - }) - }) - - app.BeginBlock(abci.RequestBeginBlock{ - Header: tmproto.Header{ - Height: 3, - }, - }) - app.Commit() - - require.Equal(t, int64(3), app.LastBlockHeight()) -} - -// Simple tx with a list of Msgs. -type txTest struct { - Msgs []sdk.Msg - Counter int64 - FailOnAnte bool -} - -func (tx *txTest) setFailOnAnte(fail bool) { - tx.FailOnAnte = fail -} - -func (tx *txTest) setFailOnHandler(fail bool) { - for i, msg := range tx.Msgs { - tx.Msgs[i] = msgCounter{msg.(msgCounter).Counter, fail} - } -} - -// Implements Tx -func (tx txTest) GetMsgs() []sdk.Msg { return tx.Msgs } -func (tx txTest) ValidateBasic() error { return nil } - -const ( - routeMsgCounter = "msgCounter" - routeMsgCounter2 = "msgCounter2" - routeMsgKeyValue = "msgKeyValue" -) - -// ValidateBasic() fails on negative counters. -// Otherwise it's up to the handlers -type msgCounter struct { - Counter int64 - FailOnHandler bool -} - -// dummy implementation of proto.Message -func (msg msgCounter) Reset() {} -func (msg msgCounter) String() string { return "TODO" } -func (msg msgCounter) ProtoMessage() {} - -// Implements Msg -func (msg msgCounter) Route() string { return routeMsgCounter } -func (msg msgCounter) Type() string { return "counter1" } -func (msg msgCounter) GetSignBytes() []byte { return nil } -func (msg msgCounter) GetSigners() []sdk.AccAddress { return nil } -func (msg msgCounter) ValidateBasic() error { - if msg.Counter >= 0 { - return nil - } - return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") -} - -func newTxCounter(counter int64, msgCounters ...int64) *txTest { - msgs := make([]sdk.Msg, 0, len(msgCounters)) - for _, c := range msgCounters { - msgs = append(msgs, msgCounter{c, false}) - } - - return &txTest{msgs, counter, false} -} - -// a msg we dont know how to route -type msgNoRoute struct { - msgCounter -} - -func (tx msgNoRoute) Route() string { return "noroute" } - -// a msg we dont know how to decode -type msgNoDecode struct { - msgCounter -} - -func (tx msgNoDecode) Route() string { return routeMsgCounter } - -// Another counter msg. Duplicate of msgCounter -type msgCounter2 struct { - Counter int64 -} - -// dummy implementation of proto.Message -func (msg msgCounter2) Reset() {} -func (msg msgCounter2) String() string { return "TODO" } -func (msg msgCounter2) ProtoMessage() {} - -// Implements Msg -func (msg msgCounter2) Route() string { return routeMsgCounter2 } -func (msg msgCounter2) Type() string { return "counter2" } -func (msg msgCounter2) GetSignBytes() []byte { return nil } -func (msg msgCounter2) GetSigners() []sdk.AccAddress { return nil } -func (msg msgCounter2) ValidateBasic() error { - if msg.Counter >= 0 { - return nil - } - return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") -} - -// A msg that sets a key/value pair. -type msgKeyValue struct { - Key []byte - Value []byte -} - -func (msg msgKeyValue) Reset() {} -func (msg msgKeyValue) String() string { return "TODO" } -func (msg msgKeyValue) ProtoMessage() {} -func (msg msgKeyValue) Route() string { return routeMsgKeyValue } -func (msg msgKeyValue) Type() string { return "keyValue" } -func (msg msgKeyValue) GetSignBytes() []byte { return nil } -func (msg msgKeyValue) GetSigners() []sdk.AccAddress { return nil } -func (msg msgKeyValue) ValidateBasic() error { - if msg.Key == nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "key cannot be nil") - } - if msg.Value == nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "value cannot be nil") - } - return nil -} - -// amino decode -func testTxDecoder(cdc *codec.LegacyAmino) sdk.TxDecoder { - return func(txBytes []byte) (sdk.Tx, error) { - var tx txTest - if len(txBytes) == 0 { - return nil, sdkerrors.Wrap(sdkerrors.ErrTxDecode, "tx bytes are empty") - } - - err := cdc.Unmarshal(txBytes, &tx) - if err != nil { - return nil, sdkerrors.ErrTxDecode - } - - return tx, nil - } -} - -func anteHandlerTxTest(t *testing.T, capKey sdk.StoreKey, storeKey []byte) sdk.AnteHandler { - return func(ctx sdk.Context, tx sdk.Tx, simulate bool) (sdk.Context, error) { - store := ctx.KVStore(capKey) - txTest := tx.(txTest) + ctx := app.NewContext(true, tmproto.Header{}) - if txTest.FailOnAnte { - return ctx, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "ante handler failure") - } + app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: 0}}) + require.Equal(t, uint64(0), app.getMaximumBlockGas(ctx)) - _, err := incrementingCounter(t, store, storeKey, txTest.Counter) - if err != nil { - return ctx, err - } + app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: -1}}) + require.Equal(t, uint64(0), app.getMaximumBlockGas(ctx)) - ctx.EventManager().EmitEvents( - counterEvent("ante_handler", txTest.Counter), - ) + app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: 5000000}}) + require.Equal(t, uint64(5000000), app.getMaximumBlockGas(ctx)) - return ctx, nil - } + app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: -5000000}}) + require.Panics(t, func() { app.getMaximumBlockGas(ctx) }) } -func counterEvent(evType string, msgCount int64) sdk.Events { - return sdk.Events{ - sdk.NewEvent( - evType, - sdk.NewAttribute("update_counter", fmt.Sprintf("%d", msgCount)), - ), +func TestListSnapshots(t *testing.T) { + type setupConfig struct { + blocks uint64 + blockTxs int + snapshotInterval uint64 + snapshotKeepEvery uint32 } -} - -func handlerMsgCounter(t *testing.T, capKey sdk.StoreKey, deliverKey []byte) sdk.Handler { - return func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - ctx = ctx.WithEventManager(sdk.NewEventManager()) - store := ctx.KVStore(capKey) - var msgCount int64 - switch m := msg.(type) { - case *msgCounter: - if m.FailOnHandler { - return nil, sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "message handler failure") - } + app, _ := setupBaseAppWithSnapshots(t, 2, 5) - msgCount = m.Counter - case *msgCounter2: - msgCount = m.Counter - } + expected := abci.ResponseListSnapshots{Snapshots: []*abci.Snapshot{ + {Height: 2, Format: 1, Chunks: 2}, + }} - ctx.EventManager().EmitEvents( - counterEvent(sdk.EventTypeMessage, msgCount), - ) - - res, err := incrementingCounter(t, store, deliverKey, msgCount) - if err != nil { - return nil, err - } + resp := app.ListSnapshots(abci.RequestListSnapshots{}) + queryResponse := app.Query(abci.RequestQuery{ + Path: "/app/snapshots", + }) - res.Events = ctx.EventManager().Events().ToABCIEvents() - return res, nil - } -} + queryListSnapshotsResp := abci.ResponseListSnapshots{} + err := json.Unmarshal(queryResponse.Value, &queryListSnapshotsResp) + require.NoError(t, err) -func getIntFromStore(store sdk.KVStore, key []byte) int64 { - bz := store.Get(key) - if len(bz) == 0 { - return 0 - } - i, err := binary.ReadVarint(bytes.NewBuffer(bz)) - if err != nil { - panic(err) + for i, s := range resp.Snapshots { + querySnapshot := queryListSnapshotsResp.Snapshots[i] + // we check that the query snapshot and function snapshot are equal + // Then we check that the hash and metadata are not empty. We atm + // do not have a good way to generate the expected value for these. + assert.Equal(t, *s, *querySnapshot) + assert.NotEmpty(t, s.Hash) + assert.NotEmpty(t, s.Metadata) + // Set hash and metadata to nil, so we can check the other snapshot + // fields against expected + s.Hash = nil + s.Metadata = nil } - return i -} - -func setIntOnStore(store sdk.KVStore, key []byte, i int64) { - bz := make([]byte, 8) - n := binary.PutVarint(bz, i) - store.Set(key, bz[:n]) -} - -// check counter matches what's in store. -// increment and store -func incrementingCounter(t *testing.T, store sdk.KVStore, counterKey []byte, counter int64) (*sdk.Result, error) { - storedCounter := getIntFromStore(store, counterKey) - require.Equal(t, storedCounter, counter) - setIntOnStore(store, counterKey, counter+1) - return &sdk.Result{}, nil -} - -//--------------------------------------------------------------------- -// Tx processing - CheckTx, DeliverTx, SimulateTx. -// These tests use the serialized tx as input, while most others will use the -// Check(), Deliver(), Simulate() methods directly. -// Ensure that Check/Deliver/Simulate work as expected with the store. - -// Test that successive CheckTx can see each others' effects -// on the store within a block, and that the CheckTx state -// gets reset to the latest committed state during Commit -func TestCheckTx(t *testing.T) { - // This ante handler reads the key and checks that the value matches the current counter. - // This ensures changes to the kvstore persist across successive CheckTx. - counterKey := []byte("counter-key") - - anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, counterKey)) } - routerOpt := func(bapp *BaseApp) { - // TODO: can remove this once CheckTx doesnt process msgs. - bapp.Router().AddRoute(sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - return &sdk.Result{}, nil - })) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - nTxs := int64(5) - app.InitChain(abci.RequestInitChain{}) - - // Create same codec used in txDecoder - codec := codec.NewLegacyAmino() - registerTestCodec(codec) - - for i := int64(0); i < nTxs; i++ { - tx := newTxCounter(i, 0) // no messages - txBytes, err := codec.Marshal(tx) - require.NoError(t, err) - r := app.CheckTx(abci.RequestCheckTx{Tx: txBytes}) - require.Empty(t, r.GetEvents()) - require.True(t, r.IsOK(), fmt.Sprintf("%v", r)) - } - - checkStateStore := app.checkState.ctx.KVStore(capKey1) - storedCounter := getIntFromStore(checkStateStore, counterKey) - - // Ensure AnteHandler ran - require.Equal(t, nTxs, storedCounter) - - // If a block is committed, CheckTx state should be reset. - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header, Hash: []byte("hash")}) - - require.NotNil(t, app.checkState.ctx.BlockGasMeter(), "block gas meter should have been set to checkState") - require.NotEmpty(t, app.checkState.ctx.HeaderHash()) - - app.EndBlock(abci.RequestEndBlock{}) - app.Commit() - - checkStateStore = app.checkState.ctx.KVStore(capKey1) - storedBytes := checkStateStore.Get(counterKey) - require.Nil(t, storedBytes) -} - -// Test that successive DeliverTx can see each others' effects -// on the store, both within and across blocks. -func TestDeliverTx(t *testing.T) { - // test increments in the ante - anteKey := []byte("ante-key") - anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } - - // test increments in the handler - deliverKey := []byte("deliver-key") - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - app.InitChain(abci.RequestInitChain{}) - - // Create same codec used in txDecoder - codec := codec.NewLegacyAmino() - registerTestCodec(codec) - - nBlocks := 3 - txPerHeight := 5 - - for blockN := 0; blockN < nBlocks; blockN++ { - header := tmproto.Header{Height: int64(blockN) + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - for i := 0; i < txPerHeight; i++ { - counter := int64(blockN*txPerHeight + i) - tx := newTxCounter(counter, counter) - - txBytes, err := codec.Marshal(tx) - require.NoError(t, err) - - res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) - events := res.GetEvents() - require.Len(t, events, 3, "should contain ante handler, message type and counter events respectively") - require.Equal(t, sdk.MarkEventsToIndex(counterEvent("ante_handler", counter).ToABCIEvents(), map[string]struct{}{})[0], events[0], "ante handler event") - require.Equal(t, sdk.MarkEventsToIndex(counterEvent(sdk.EventTypeMessage, counter).ToABCIEvents(), map[string]struct{}{})[0], events[2], "msg handler update counter event") - } - - app.EndBlock(abci.RequestEndBlock{}) - app.Commit() - } -} - -// Number of messages doesn't matter to CheckTx. -func TestMultiMsgCheckTx(t *testing.T) { - // TODO: ensure we get the same results - // with one message or many -} - -// One call to DeliverTx should process all the messages, in order. -func TestMultiMsgDeliverTx(t *testing.T) { - // increment the tx counter - anteKey := []byte("ante-key") - anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } - - // increment the msg counter - deliverKey := []byte("deliver-key") - deliverKey2 := []byte("deliver-key2") - routerOpt := func(bapp *BaseApp) { - r1 := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) - r2 := sdk.NewRoute(routeMsgCounter2, handlerMsgCounter(t, capKey1, deliverKey2)) - bapp.Router().AddRoute(r1) - bapp.Router().AddRoute(r2) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - // Create same codec used in txDecoder - codec := codec.NewLegacyAmino() - registerTestCodec(codec) - - // run a multi-msg tx - // with all msgs the same route - - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - tx := newTxCounter(0, 0, 1, 2) - txBytes, err := codec.Marshal(tx) - require.NoError(t, err) - res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) - - store := app.deliverState.ctx.KVStore(capKey1) - - // tx counter only incremented once - txCounter := getIntFromStore(store, anteKey) - require.Equal(t, int64(1), txCounter) - - // msg counter incremented three times - msgCounter := getIntFromStore(store, deliverKey) - require.Equal(t, int64(3), msgCounter) - - // replace the second message with a msgCounter2 - - tx = newTxCounter(1, 3) - tx.Msgs = append(tx.Msgs, msgCounter2{0}) - tx.Msgs = append(tx.Msgs, msgCounter2{1}) - txBytes, err = codec.Marshal(tx) - require.NoError(t, err) - res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) - - store = app.deliverState.ctx.KVStore(capKey1) - - // tx counter only incremented once - txCounter = getIntFromStore(store, anteKey) - require.Equal(t, int64(2), txCounter) - - // original counter increments by one - // new counter increments by two - msgCounter = getIntFromStore(store, deliverKey) - require.Equal(t, int64(4), msgCounter) - msgCounter2 := getIntFromStore(store, deliverKey2) - require.Equal(t, int64(2), msgCounter2) -} - -// Interleave calls to Check and Deliver and ensure -// that there is no cross-talk. Check sees results of the previous Check calls -// and Deliver sees that of the previous Deliver calls, but they don't see eachother. -func TestConcurrentCheckDeliver(t *testing.T) { - // TODO -} - -// Simulate a transaction that uses gas to compute the gas. -// Simulate() and Query("/app/simulate", txBytes) should give -// the same results. -func TestSimulateTx(t *testing.T) { - gasConsumed := uint64(5) - - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasConsumed)) - return - }) - } - - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - ctx.GasMeter().ConsumeGas(gasConsumed, "test") - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - app.InitChain(abci.RequestInitChain{}) - - // Create same codec used in txDecoder - cdc := codec.NewLegacyAmino() - registerTestCodec(cdc) - - nBlocks := 3 - for blockN := 0; blockN < nBlocks; blockN++ { - count := int64(blockN + 1) - header := tmproto.Header{Height: count} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - tx := newTxCounter(count, count) - txBytes, err := cdc.Marshal(tx) - require.Nil(t, err) - - // simulate a message, check gas reported - gInfo, result, err := app.Simulate(txBytes) - require.NoError(t, err) - require.NotNil(t, result) - require.Equal(t, gasConsumed, gInfo.GasUsed) - - // simulate again, same result - gInfo, result, err = app.Simulate(txBytes) - require.NoError(t, err) - require.NotNil(t, result) - require.Equal(t, gasConsumed, gInfo.GasUsed) - - // simulate by calling Query with encoded tx - query := abci.RequestQuery{ - Path: "/app/simulate", - Data: txBytes, - } - queryResult := app.Query(query) - require.True(t, queryResult.IsOK(), queryResult.Log) - - var simRes sdk.SimulationResponse - require.NoError(t, jsonpb.Unmarshal(strings.NewReader(string(queryResult.Value)), &simRes)) - - require.Equal(t, gInfo, simRes.GasInfo) - require.Equal(t, result.Log, simRes.Result.Log) - require.Equal(t, result.Events, simRes.Result.Events) - require.True(t, bytes.Equal(result.Data, simRes.Result.Data)) - - app.EndBlock(abci.RequestEndBlock{}) - app.Commit() - } -} - -func TestRunInvalidTransaction(t *testing.T) { - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - return - }) - } - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - // transaction with no messages - { - emptyTx := &txTest{} - _, result, err := app.Deliver(aminoTxEncoder(), emptyTx) - require.Error(t, err) - require.Nil(t, result) - - space, code, _ := sdkerrors.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrInvalidRequest.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrInvalidRequest.ABCICode(), code, err) - } - - // transaction where ValidateBasic fails - { - testCases := []struct { - tx *txTest - fail bool - }{ - {newTxCounter(0, 0), false}, - {newTxCounter(-1, 0), false}, - {newTxCounter(100, 100), false}, - {newTxCounter(100, 5, 4, 3, 2, 1), false}, - - {newTxCounter(0, -1), true}, - {newTxCounter(0, 1, -2), true}, - {newTxCounter(0, 1, 2, -10, 5), true}, - } - - for _, testCase := range testCases { - tx := testCase.tx - _, result, err := app.Deliver(aminoTxEncoder(), tx) - - if testCase.fail { - require.Error(t, err) - - space, code, _ := sdkerrors.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrInvalidSequence.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrInvalidSequence.ABCICode(), code, err) - } else { - require.NotNil(t, result) - } - } - } - - // transaction with no known route - { - unknownRouteTx := txTest{[]sdk.Msg{msgNoRoute{}}, 0, false} - _, result, err := app.Deliver(aminoTxEncoder(), unknownRouteTx) - require.Error(t, err) - require.Nil(t, result) - - space, code, _ := sdkerrors.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrUnknownRequest.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrUnknownRequest.ABCICode(), code, err) - - unknownRouteTx = txTest{[]sdk.Msg{msgCounter{}, msgNoRoute{}}, 0, false} - _, result, err = app.Deliver(aminoTxEncoder(), unknownRouteTx) - require.Error(t, err) - require.Nil(t, result) - - space, code, _ = sdkerrors.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrUnknownRequest.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrUnknownRequest.ABCICode(), code, err) - } - - // Transaction with an unregistered message - { - tx := newTxCounter(0, 0) - tx.Msgs = append(tx.Msgs, msgNoDecode{}) - - // new codec so we can encode the tx, but we shouldn't be able to decode - newCdc := codec.NewLegacyAmino() - registerTestCodec(newCdc) - newCdc.RegisterConcrete(&msgNoDecode{}, "cosmos-sdk/baseapp/msgNoDecode", nil) - - txBytes, err := newCdc.Marshal(tx) - require.NoError(t, err) - - res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.EqualValues(t, sdkerrors.ErrTxDecode.ABCICode(), res.Code) - require.EqualValues(t, sdkerrors.ErrTxDecode.Codespace(), res.Codespace) - } -} - -// Test that transactions exceeding gas limits fail -func TestTxGasLimits(t *testing.T) { - gasGranted := uint64(10) - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasGranted)) - - // AnteHandlers must have their own defer/recover in order for the BaseApp - // to know how much gas was used! This is because the GasMeter is created in - // the AnteHandler, but if it panics the context won't be set properly in - // runTx's recover call. - defer func() { - if r := recover(); r != nil { - switch rType := r.(type) { - case sdk.ErrorOutOfGas: - err = sdkerrors.Wrapf(sdkerrors.ErrOutOfGas, "out of gas in location: %v", rType.Descriptor) - default: - panic(r) - } - } - }() - - count := tx.(txTest).Counter - newCtx.GasMeter().ConsumeGas(uint64(count), "counter-ante") - - return newCtx, nil - }) - } - - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - count := msg.(*msgCounter).Counter - ctx.GasMeter().ConsumeGas(uint64(count), "counter-handler") - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - testCases := []struct { - tx *txTest - gasUsed uint64 - fail bool - }{ - {newTxCounter(0, 0), 0, false}, - {newTxCounter(1, 1), 2, false}, - {newTxCounter(9, 1), 10, false}, - {newTxCounter(1, 9), 10, false}, - {newTxCounter(10, 0), 10, false}, - {newTxCounter(0, 10), 10, false}, - {newTxCounter(0, 8, 2), 10, false}, - {newTxCounter(0, 5, 1, 1, 1, 1, 1), 10, false}, - {newTxCounter(0, 5, 1, 1, 1, 1), 9, false}, - - {newTxCounter(9, 2), 11, true}, - {newTxCounter(2, 9), 11, true}, - {newTxCounter(9, 1, 1), 11, true}, - {newTxCounter(1, 8, 1, 1), 11, true}, - {newTxCounter(11, 0), 11, true}, - {newTxCounter(0, 11), 11, true}, - {newTxCounter(0, 5, 11), 16, true}, - } - - for i, tc := range testCases { - tx := tc.tx - gInfo, result, err := app.Deliver(aminoTxEncoder(), tx) - - // check gas used and wanted - require.Equal(t, tc.gasUsed, gInfo.GasUsed, fmt.Sprintf("tc #%d; gas: %v, result: %v, err: %s", i, gInfo, result, err)) - - // check for out of gas - if !tc.fail { - require.NotNil(t, result, fmt.Sprintf("%d: %v, %v", i, tc, err)) - } else { - require.Error(t, err) - require.Nil(t, result) - - space, code, _ := sdkerrors.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrOutOfGas.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrOutOfGas.ABCICode(), code, err) - } - } -} - -// Test that transactions exceeding gas limits fail -func TestMaxBlockGasLimits(t *testing.T) { - gasGranted := uint64(10) - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasGranted)) - - defer func() { - if r := recover(); r != nil { - switch rType := r.(type) { - case sdk.ErrorOutOfGas: - err = sdkerrors.Wrapf(sdkerrors.ErrOutOfGas, "out of gas in location: %v", rType.Descriptor) - default: - panic(r) - } - } - }() - - count := tx.(txTest).Counter - newCtx.GasMeter().ConsumeGas(uint64(count), "counter-ante") - - return - }) - } - - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - count := msg.(*msgCounter).Counter - ctx.GasMeter().ConsumeGas(uint64(count), "counter-handler") - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - app.InitChain(abci.RequestInitChain{ - ConsensusParams: &abci.ConsensusParams{ - Block: &abci.BlockParams{ - MaxGas: 100, - }, - }, - }) - - testCases := []struct { - tx *txTest - numDelivers int - gasUsedPerDeliver uint64 - fail bool - failAfterDeliver int - }{ - {newTxCounter(0, 0), 0, 0, false, 0}, - {newTxCounter(9, 1), 2, 10, false, 0}, - {newTxCounter(10, 0), 3, 10, false, 0}, - {newTxCounter(10, 0), 10, 10, false, 0}, - {newTxCounter(2, 7), 11, 9, false, 0}, - {newTxCounter(10, 0), 10, 10, false, 0}, // hit the limit but pass - - {newTxCounter(10, 0), 11, 10, true, 10}, - {newTxCounter(10, 0), 15, 10, true, 10}, - {newTxCounter(9, 0), 12, 9, true, 11}, // fly past the limit - } - - for i, tc := range testCases { - tx := tc.tx - - // reset the block gas - header := tmproto.Header{Height: app.LastBlockHeight() + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - // execute the transaction multiple times - for j := 0; j < tc.numDelivers; j++ { - _, result, err := app.Deliver(aminoTxEncoder(), tx) - - ctx := app.getState(runTxModeDeliver).ctx - - // check for failed transactions - if tc.fail && (j+1) > tc.failAfterDeliver { - require.Error(t, err, fmt.Sprintf("tc #%d; result: %v, err: %s", i, result, err)) - require.Nil(t, result, fmt.Sprintf("tc #%d; result: %v, err: %s", i, result, err)) - - space, code, _ := sdkerrors.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrOutOfGas.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrOutOfGas.ABCICode(), code, err) - require.True(t, ctx.BlockGasMeter().IsOutOfGas()) - } else { - // check gas used and wanted - blockGasUsed := ctx.BlockGasMeter().GasConsumed() - expBlockGasUsed := tc.gasUsedPerDeliver * uint64(j+1) - require.Equal( - t, expBlockGasUsed, blockGasUsed, - fmt.Sprintf("%d,%d: %v, %v, %v, %v", i, j, tc, expBlockGasUsed, blockGasUsed, result), - ) - - require.NotNil(t, result, fmt.Sprintf("tc #%d; currDeliver: %d, result: %v, err: %s", i, j, result, err)) - require.False(t, ctx.BlockGasMeter().IsPastLimit()) - } - } - } -} - -// Test custom panic handling within app.DeliverTx method -func TestCustomRunTxPanicHandler(t *testing.T) { - const customPanicMsg = "test panic" - anteErr := sdkerrors.Register("fakeModule", 100500, "fakeError") - - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - panic(sdkerrors.Wrap(anteErr, "anteHandler")) - }) - } - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - app.AddRunTxRecoveryHandler(func(recoveryObj interface{}) error { - err, ok := recoveryObj.(error) - if !ok { - return nil - } - - if anteErr.Is(err) { - panic(customPanicMsg) - } else { - return nil - } - }) - - // Transaction should panic with custom handler above - { - tx := newTxCounter(0, 0) - - require.PanicsWithValue(t, customPanicMsg, func() { app.Deliver(aminoTxEncoder(), tx) }) - } -} - -func TestBaseAppAnteHandler(t *testing.T) { - anteKey := []byte("ante-key") - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) - } - - deliverKey := []byte("deliver-key") - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) - bapp.Router().AddRoute(r) - } - - cdc := codec.NewLegacyAmino() - app := setupBaseApp(t, anteOpt, routerOpt) - - app.InitChain(abci.RequestInitChain{}) - registerTestCodec(cdc) - - header := tmproto.Header{Height: app.LastBlockHeight() + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - // execute a tx that will fail ante handler execution - // - // NOTE: State should not be mutated here. This will be implicitly checked by - // the next txs ante handler execution (anteHandlerTxTest). - tx := newTxCounter(0, 0) - tx.setFailOnAnte(true) - txBytes, err := cdc.Marshal(tx) - require.NoError(t, err) - res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.Empty(t, res.Events) - require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) - - ctx := app.getState(runTxModeDeliver).ctx - store := ctx.KVStore(capKey1) - require.Equal(t, int64(0), getIntFromStore(store, anteKey)) - - // execute at tx that will pass the ante handler (the checkTx state should - // mutate) but will fail the message handler - tx = newTxCounter(0, 0) - tx.setFailOnHandler(true) - - txBytes, err = cdc.Marshal(tx) - require.NoError(t, err) - - res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - // should emit ante event - require.NotEmpty(t, res.Events) - require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) - - ctx = app.getState(runTxModeDeliver).ctx - store = ctx.KVStore(capKey1) - require.Equal(t, int64(1), getIntFromStore(store, anteKey)) - require.Equal(t, int64(0), getIntFromStore(store, deliverKey)) - - // execute a successful ante handler and message execution where state is - // implicitly checked by previous tx executions - tx = newTxCounter(1, 0) - - txBytes, err = cdc.Marshal(tx) - require.NoError(t, err) - - res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.NotEmpty(t, res.Events) - require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) - - ctx = app.getState(runTxModeDeliver).ctx - store = ctx.KVStore(capKey1) - require.Equal(t, int64(2), getIntFromStore(store, anteKey)) - require.Equal(t, int64(1), getIntFromStore(store, deliverKey)) - - // commit - app.EndBlock(abci.RequestEndBlock{}) - app.Commit() -} - -func TestGasConsumptionBadTx(t *testing.T) { - gasWanted := uint64(5) - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasWanted)) - - defer func() { - if r := recover(); r != nil { - switch rType := r.(type) { - case sdk.ErrorOutOfGas: - log := fmt.Sprintf("out of gas in location: %v", rType.Descriptor) - err = sdkerrors.Wrap(sdkerrors.ErrOutOfGas, log) - default: - panic(r) - } - } - }() - - txTest := tx.(txTest) - newCtx.GasMeter().ConsumeGas(uint64(txTest.Counter), "counter-ante") - if txTest.FailOnAnte { - return newCtx, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "ante handler failure") - } - - return - }) - } - - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - count := msg.(*msgCounter).Counter - ctx.GasMeter().ConsumeGas(uint64(count), "counter-handler") - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - cdc := codec.NewLegacyAmino() - registerTestCodec(cdc) - - app := setupBaseApp(t, anteOpt, routerOpt) - app.InitChain(abci.RequestInitChain{ - ConsensusParams: &abci.ConsensusParams{ - Block: &abci.BlockParams{ - MaxGas: 9, - }, - }, - }) - - app.InitChain(abci.RequestInitChain{}) - - header := tmproto.Header{Height: app.LastBlockHeight() + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - tx := newTxCounter(5, 0) - tx.setFailOnAnte(true) - txBytes, err := cdc.Marshal(tx) - require.NoError(t, err) - - res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) - - // require next tx to fail due to black gas limit - tx = newTxCounter(5, 0) - txBytes, err = cdc.Marshal(tx) - require.NoError(t, err) - - res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) -} - -// Test that we can only query from the latest committed state. -func TestQuery(t *testing.T) { - key, value := []byte("hello"), []byte("goodbye") - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - store := ctx.KVStore(capKey1) - store.Set(key, value) - return - }) - } - - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - store := ctx.KVStore(capKey1) - store.Set(key, value) - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - app.InitChain(abci.RequestInitChain{}) - - // NOTE: "/store/key1" tells us KVStore - // and the final "/key" says to use the data as the - // key in the given KVStore ... - query := abci.RequestQuery{ - Path: "/store/key1/key", - Data: key, - } - tx := newTxCounter(0, 0) - - // query is empty before we do anything - res := app.Query(query) - require.Equal(t, 0, len(res.Value)) - - // query is still empty after a CheckTx - _, resTx, err := app.Check(aminoTxEncoder(), tx) - require.NoError(t, err) - require.NotNil(t, resTx) - res = app.Query(query) - require.Equal(t, 0, len(res.Value)) - - // query is still empty after a DeliverTx before we commit - header := tmproto.Header{Height: app.LastBlockHeight() + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - _, resTx, err = app.Deliver(aminoTxEncoder(), tx) - require.NoError(t, err) - require.NotNil(t, resTx) - res = app.Query(query) - require.Equal(t, 0, len(res.Value)) - - // query returns correct value after Commit - app.Commit() - res = app.Query(query) - require.Equal(t, value, res.Value) -} - -func TestGRPCQuery(t *testing.T) { - grpcQueryOpt := func(bapp *BaseApp) { - testdata.RegisterQueryServer( - bapp.GRPCQueryRouter(), - testdata.QueryImpl{}, - ) - } - - app := setupBaseApp(t, grpcQueryOpt) - - app.InitChain(abci.RequestInitChain{}) - header := tmproto.Header{Height: app.LastBlockHeight() + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - app.Commit() - - req := testdata.SayHelloRequest{Name: "foo"} - reqBz, err := req.Marshal() - require.NoError(t, err) - - reqQuery := abci.RequestQuery{ - Data: reqBz, - Path: "/testdata.Query/SayHello", - } - - resQuery := app.Query(reqQuery) - - require.Equal(t, abci.CodeTypeOK, resQuery.Code, resQuery) - - var res testdata.SayHelloResponse - err = res.Unmarshal(resQuery.Value) - require.NoError(t, err) - require.Equal(t, "Hello foo!", res.Greeting) -} - -// Test p2p filter queries -func TestP2PQuery(t *testing.T) { - addrPeerFilterOpt := func(bapp *BaseApp) { - bapp.SetAddrPeerFilter(func(addrport string) abci.ResponseQuery { - require.Equal(t, "1.1.1.1:8000", addrport) - return abci.ResponseQuery{Code: uint32(3)} - }) - } - - idPeerFilterOpt := func(bapp *BaseApp) { - bapp.SetIDPeerFilter(func(id string) abci.ResponseQuery { - require.Equal(t, "testid", id) - return abci.ResponseQuery{Code: uint32(4)} - }) - } - - app := setupBaseApp(t, addrPeerFilterOpt, idPeerFilterOpt) - - addrQuery := abci.RequestQuery{ - Path: "/p2p/filter/addr/1.1.1.1:8000", - } - res := app.Query(addrQuery) - require.Equal(t, uint32(3), res.Code) - - idQuery := abci.RequestQuery{ - Path: "/p2p/filter/id/testid", - } - res = app.Query(idQuery) - require.Equal(t, uint32(4), res.Code) -} - -func TestGetMaximumBlockGas(t *testing.T) { - app := setupBaseApp(t) - app.InitChain(abci.RequestInitChain{}) - ctx := app.NewContext(true, tmproto.Header{}) - - app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: 0}}) - require.Equal(t, uint64(0), app.getMaximumBlockGas(ctx)) - - app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: -1}}) - require.Equal(t, uint64(0), app.getMaximumBlockGas(ctx)) - - app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: 5000000}}) - require.Equal(t, uint64(5000000), app.getMaximumBlockGas(ctx)) - - app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: -5000000}}) - require.Panics(t, func() { app.getMaximumBlockGas(ctx) }) -} - -func TestListSnapshots(t *testing.T) { - app, teardown := setupBaseAppWithSnapshots(t, 5, 4) - defer teardown() - - resp := app.ListSnapshots(abci.RequestListSnapshots{}) - for _, s := range resp.Snapshots { - assert.NotEmpty(t, s.Hash) - assert.NotEmpty(t, s.Metadata) - s.Hash = nil - s.Metadata = nil - } - assert.Equal(t, abci.ResponseListSnapshots{Snapshots: []*abci.Snapshot{ - {Height: 4, Format: 1, Chunks: 2}, - {Height: 2, Format: 1, Chunks: 1}, - }}, resp) -} - -func TestLoadSnapshotChunk(t *testing.T) { - app, teardown := setupBaseAppWithSnapshots(t, 2, 5) - defer teardown() - - testcases := map[string]struct { - height uint64 - format uint32 - chunk uint32 - expectEmpty bool - }{ - "Existing snapshot": {2, 1, 1, false}, - "Missing height": {100, 1, 1, true}, - "Missing format": {2, 2, 1, true}, - "Missing chunk": {2, 1, 9, true}, - "Zero height": {0, 1, 1, true}, - "Zero format": {2, 0, 1, true}, - "Zero chunk": {2, 1, 0, false}, - } - for name, tc := range testcases { - tc := tc - t.Run(name, func(t *testing.T) { - resp := app.LoadSnapshotChunk(abci.RequestLoadSnapshotChunk{ - Height: tc.height, - Format: tc.format, - Chunk: tc.chunk, - }) - if tc.expectEmpty { - assert.Equal(t, abci.ResponseLoadSnapshotChunk{}, resp) - return - } - assert.NotEmpty(t, resp.Chunk) - }) - } -} - -func TestOfferSnapshot_Errors(t *testing.T) { - // Set up app before test cases, since it's fairly expensive. - app, teardown := setupBaseAppWithSnapshots(t, 0, 0) - defer teardown() - - m := snapshottypes.Metadata{ChunkHashes: [][]byte{{1}, {2}, {3}}} - metadata, err := m.Marshal() - require.NoError(t, err) - hash := []byte{1, 2, 3} - - testcases := map[string]struct { - snapshot *abci.Snapshot - result abci.ResponseOfferSnapshot_Result - }{ - "nil snapshot": {nil, abci.ResponseOfferSnapshot_REJECT}, - "invalid format": {&abci.Snapshot{ - Height: 1, Format: 9, Chunks: 3, Hash: hash, Metadata: metadata, - }, abci.ResponseOfferSnapshot_REJECT_FORMAT}, - "incorrect chunk count": {&abci.Snapshot{ - Height: 1, Format: 1, Chunks: 2, Hash: hash, Metadata: metadata, - }, abci.ResponseOfferSnapshot_REJECT}, - "no chunks": {&abci.Snapshot{ - Height: 1, Format: 1, Chunks: 0, Hash: hash, Metadata: metadata, - }, abci.ResponseOfferSnapshot_REJECT}, - "invalid metadata serialization": {&abci.Snapshot{ - Height: 1, Format: 1, Chunks: 0, Hash: hash, Metadata: []byte{3, 1, 4}, - }, abci.ResponseOfferSnapshot_REJECT}, - } - for name, tc := range testcases { - tc := tc - t.Run(name, func(t *testing.T) { - resp := app.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: tc.snapshot}) - assert.Equal(t, tc.result, resp.Result) - }) - } - - // Offering a snapshot after one has been accepted should error - resp := app.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: &abci.Snapshot{ - Height: 1, - Format: snapshottypes.CurrentFormat, - Chunks: 3, - Hash: []byte{1, 2, 3}, - Metadata: metadata, - }}) - require.Equal(t, abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, resp) - - resp = app.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: &abci.Snapshot{ - Height: 2, - Format: snapshottypes.CurrentFormat, - Chunks: 3, - Hash: []byte{1, 2, 3}, - Metadata: metadata, - }}) - require.Equal(t, abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, resp) -} - -func TestApplySnapshotChunk(t *testing.T) { - source, teardown := setupBaseAppWithSnapshots(t, 4, 10) - defer teardown() - - target, teardown := setupBaseAppWithSnapshots(t, 0, 0) - defer teardown() - - // Fetch latest snapshot to restore - respList := source.ListSnapshots(abci.RequestListSnapshots{}) - require.NotEmpty(t, respList.Snapshots) - snapshot := respList.Snapshots[0] - - // Make sure the snapshot has at least 3 chunks - require.GreaterOrEqual(t, snapshot.Chunks, uint32(3), "Not enough snapshot chunks") - - // Begin a snapshot restoration in the target - respOffer := target.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: snapshot}) - require.Equal(t, abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, respOffer) - - // We should be able to pass an invalid chunk and get a verify failure, before reapplying it. - respApply := target.ApplySnapshotChunk(abci.RequestApplySnapshotChunk{ - Index: 0, - Chunk: []byte{9}, - Sender: "sender", - }) - require.Equal(t, abci.ResponseApplySnapshotChunk{ - Result: abci.ResponseApplySnapshotChunk_RETRY, - RefetchChunks: []uint32{0}, - RejectSenders: []string{"sender"}, - }, respApply) - - // Fetch each chunk from the source and apply it to the target - for index := uint32(0); index < snapshot.Chunks; index++ { - respChunk := source.LoadSnapshotChunk(abci.RequestLoadSnapshotChunk{ - Height: snapshot.Height, - Format: snapshot.Format, - Chunk: index, - }) - require.NotNil(t, respChunk.Chunk) - respApply := target.ApplySnapshotChunk(abci.RequestApplySnapshotChunk{ - Index: index, - Chunk: respChunk.Chunk, - }) - require.Equal(t, abci.ResponseApplySnapshotChunk{ - Result: abci.ResponseApplySnapshotChunk_ACCEPT, - }, respApply) - } - - // The target should now have the same hash as the source - assert.Equal(t, source.LastCommitID(), target.LastCommitID()) -} - -// NOTE: represents a new custom router for testing purposes of WithRouter() -type testCustomRouter struct { - routes sync.Map -} - -func (rtr *testCustomRouter) AddRoute(route sdk.Route) sdk.Router { - rtr.routes.Store(route.Path(), route.Handler()) - return rtr -} - -func (rtr *testCustomRouter) Route(ctx sdk.Context, path string) sdk.Handler { - if v, ok := rtr.routes.Load(path); ok { - if h, ok := v.(sdk.Handler); ok { - return h - } - } - return nil -} - -func TestWithRouter(t *testing.T) { - // test increments in the ante - anteKey := []byte("ante-key") - anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } - - // test increments in the handler - deliverKey := []byte("deliver-key") - routerOpt := func(bapp *BaseApp) { - bapp.SetRouter(&testCustomRouter{routes: sync.Map{}}) - r := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - app.InitChain(abci.RequestInitChain{}) - - // Create same codec used in txDecoder - codec := codec.NewLegacyAmino() - registerTestCodec(codec) - - nBlocks := 3 - txPerHeight := 5 - - for blockN := 0; blockN < nBlocks; blockN++ { - header := tmproto.Header{Height: int64(blockN) + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - for i := 0; i < txPerHeight; i++ { - counter := int64(blockN*txPerHeight + i) - tx := newTxCounter(counter, counter) - - txBytes, err := codec.Marshal(tx) - require.NoError(t, err) - - res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) - } - - app.EndBlock(abci.RequestEndBlock{}) - app.Commit() - } -} - -func TestBaseApp_EndBlock(t *testing.T) { - db := dbm.NewMemDB() - name := t.Name() - logger := defaultLogger() - - cp := &abci.ConsensusParams{ - Block: &abci.BlockParams{ - MaxGas: 5000000, - }, - } - - app := NewBaseApp(name, logger, db, nil) - app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) - app.InitChain(abci.RequestInitChain{ - ConsensusParams: cp, - }) - - app.SetEndBlocker(func(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock { - return abci.ResponseEndBlock{ - ValidatorUpdates: []abci.ValidatorUpdate{ - {Power: 100}, - }, - } - }) - app.Seal() - - res := app.EndBlock(abci.RequestEndBlock{}) - require.Len(t, res.GetValidatorUpdates(), 1) - require.Equal(t, int64(100), res.GetValidatorUpdates()[0].Power) - require.Equal(t, cp.Block.MaxGas, res.ConsensusParamUpdates.Block.MaxGas) + assert.Equal(t, expected, resp) } diff --git a/baseapp/deliver_tx_test.go b/baseapp/deliver_tx_test.go new file mode 100644 index 0000000000..c0126f9964 --- /dev/null +++ b/baseapp/deliver_tx_test.go @@ -0,0 +1,1840 @@ +package baseapp + +import ( + "bytes" + "encoding/binary" + "fmt" + "math/rand" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/gogo/protobuf/jsonpb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + dbm "github.com/tendermint/tm-db" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/snapshots" + snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" + "github.com/cosmos/cosmos-sdk/store/rootmulti" + store "github.com/cosmos/cosmos-sdk/store/types" + "github.com/cosmos/cosmos-sdk/testutil/testdata" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +func TestLoadSnapshotChunk(t *testing.T) { + app, teardown := setupBaseAppWithSnapshots(t, 2, 5) + defer teardown() + + testcases := map[string]struct { + height uint64 + format uint32 + chunk uint32 + expectEmpty bool + }{ + "Existing snapshot": {2, 1, 1, false}, + "Missing height": {100, 1, 1, true}, + "Missing format": {2, 2, 1, true}, + "Missing chunk": {2, 1, 9, true}, + "Zero height": {0, 1, 1, true}, + "Zero format": {2, 0, 1, true}, + "Zero chunk": {2, 1, 0, false}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + resp := app.LoadSnapshotChunk(abci.RequestLoadSnapshotChunk{ + Height: tc.height, + Format: tc.format, + Chunk: tc.chunk, + }) + if tc.expectEmpty { + assert.Equal(t, abci.ResponseLoadSnapshotChunk{}, resp) + return + } + assert.NotEmpty(t, resp.Chunk) + }) + } +} + +func TestOfferSnapshot_Errors(t *testing.T) { + // Set up app before test cases, since it's fairly expensive. + app, teardown := setupBaseAppWithSnapshots(t, 0, 0) + defer teardown() + + m := snapshottypes.Metadata{ChunkHashes: [][]byte{{1}, {2}, {3}}} + metadata, err := m.Marshal() + require.NoError(t, err) + hash := []byte{1, 2, 3} + + testcases := map[string]struct { + snapshot *abci.Snapshot + result abci.ResponseOfferSnapshot_Result + }{ + "nil snapshot": {nil, abci.ResponseOfferSnapshot_REJECT}, + "invalid format": {&abci.Snapshot{ + Height: 1, Format: 9, Chunks: 3, Hash: hash, Metadata: metadata, + }, abci.ResponseOfferSnapshot_REJECT_FORMAT}, + "incorrect chunk count": {&abci.Snapshot{ + Height: 1, Format: 1, Chunks: 2, Hash: hash, Metadata: metadata, + }, abci.ResponseOfferSnapshot_REJECT}, + "no chunks": {&abci.Snapshot{ + Height: 1, Format: 1, Chunks: 0, Hash: hash, Metadata: metadata, + }, abci.ResponseOfferSnapshot_REJECT}, + "invalid metadata serialization": {&abci.Snapshot{ + Height: 1, Format: 1, Chunks: 0, Hash: hash, Metadata: []byte{3, 1, 4}, + }, abci.ResponseOfferSnapshot_REJECT}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + resp := app.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: tc.snapshot}) + assert.Equal(t, tc.result, resp.Result) + }) + } + + // Offering a snapshot after one has been accepted should error + resp := app.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: &abci.Snapshot{ + Height: 1, + Format: snapshottypes.CurrentFormat, + Chunks: 3, + Hash: []byte{1, 2, 3}, + Metadata: metadata, + }}) + require.Equal(t, abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, resp) + + resp = app.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: &abci.Snapshot{ + Height: 2, + Format: snapshottypes.CurrentFormat, + Chunks: 3, + Hash: []byte{1, 2, 3}, + Metadata: metadata, + }}) + require.Equal(t, abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, resp) +} + +func TestApplySnapshotChunk(t *testing.T) { + source, teardown := setupBaseAppWithSnapshots(t, 4, 10) + defer teardown() + + target, teardown := setupBaseAppWithSnapshots(t, 0, 0) + defer teardown() + + // Fetch latest snapshot to restore + respList := source.ListSnapshots(abci.RequestListSnapshots{}) + require.NotEmpty(t, respList.Snapshots) + snapshot := respList.Snapshots[0] + + // Make sure the snapshot has at least 3 chunks + require.GreaterOrEqual(t, snapshot.Chunks, uint32(3), "Not enough snapshot chunks") + + // Begin a snapshot restoration in the target + respOffer := target.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: snapshot}) + require.Equal(t, abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, respOffer) + + // We should be able to pass an invalid chunk and get a verify failure, before reapplying it. + respApply := target.ApplySnapshotChunk(abci.RequestApplySnapshotChunk{ + Index: 0, + Chunk: []byte{9}, + Sender: "sender", + }) + require.Equal(t, abci.ResponseApplySnapshotChunk{ + Result: abci.ResponseApplySnapshotChunk_RETRY, + RefetchChunks: []uint32{0}, + RejectSenders: []string{"sender"}, + }, respApply) + + // Fetch each chunk from the source and apply it to the target + for index := uint32(0); index < snapshot.Chunks; index++ { + respChunk := source.LoadSnapshotChunk(abci.RequestLoadSnapshotChunk{ + Height: snapshot.Height, + Format: snapshot.Format, + Chunk: index, + }) + require.NotNil(t, respChunk.Chunk) + respApply := target.ApplySnapshotChunk(abci.RequestApplySnapshotChunk{ + Index: index, + Chunk: respChunk.Chunk, + }) + require.Equal(t, abci.ResponseApplySnapshotChunk{ + Result: abci.ResponseApplySnapshotChunk_ACCEPT, + }, respApply) + } + + // The target should now have the same hash as the source + assert.Equal(t, source.LastCommitID(), target.LastCommitID()) +} + +// NOTE: represents a new custom router for testing purposes of WithRouter() +type testCustomRouter struct { + routes sync.Map +} + +func (rtr *testCustomRouter) AddRoute(route sdk.Route) sdk.Router { + rtr.routes.Store(route.Path(), route.Handler()) + return rtr +} + +func (rtr *testCustomRouter) Route(ctx sdk.Context, path string) sdk.Handler { + if v, ok := rtr.routes.Load(path); ok { + if h, ok := v.(sdk.Handler); ok { + return h + } + } + return nil +} + +func TestWithRouter(t *testing.T) { + // test increments in the ante + anteKey := []byte("ante-key") + anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } + + // test increments in the handler + deliverKey := []byte("deliver-key") + routerOpt := func(bapp *BaseApp) { + bapp.SetRouter(&testCustomRouter{routes: sync.Map{}}) + r := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) + bapp.Router().AddRoute(r) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + app.InitChain(abci.RequestInitChain{}) + + // Create same codec used in txDecoder + codec := codec.NewLegacyAmino() + registerTestCodec(codec) + + nBlocks := 3 + txPerHeight := 5 + + for blockN := 0; blockN < nBlocks; blockN++ { + header := tmproto.Header{Height: int64(blockN) + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + for i := 0; i < txPerHeight; i++ { + counter := int64(blockN*txPerHeight + i) + tx := newTxCounter(counter, counter) + + txBytes, err := codec.Marshal(tx) + require.NoError(t, err) + + res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) + } + + app.EndBlock(abci.RequestEndBlock{}) + app.Commit() + } +} + +func TestBaseApp_EndBlock(t *testing.T) { + db := dbm.NewMemDB() + name := t.Name() + logger := defaultLogger() + + cp := &abci.ConsensusParams{ + Block: &abci.BlockParams{ + MaxGas: 5000000, + }, + } + + app := NewBaseApp(name, logger, db, nil) + app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) + app.InitChain(abci.RequestInitChain{ + ConsensusParams: cp, + }) + + app.SetEndBlocker(func(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock { + return abci.ResponseEndBlock{ + ValidatorUpdates: []abci.ValidatorUpdate{ + {Power: 100}, + }, + } + }) + app.Seal() + + res := app.EndBlock(abci.RequestEndBlock{}) + require.Len(t, res.GetValidatorUpdates(), 1) + require.Equal(t, int64(100), res.GetValidatorUpdates()[0].Power) + require.Equal(t, cp.Block.MaxGas, res.ConsensusParamUpdates.Block.MaxGas) +} + +// Test that we can only query from the latest committed state. +func TestQuery(t *testing.T) { + key, value := []byte("hello"), []byte("goodbye") + anteOpt := func(bapp *BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + store := ctx.KVStore(capKey1) + store.Set(key, value) + return + }) + } + + routerOpt := func(bapp *BaseApp) { + r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + store := ctx.KVStore(capKey1) + store.Set(key, value) + return &sdk.Result{}, nil + }) + bapp.Router().AddRoute(r) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + + app.InitChain(abci.RequestInitChain{}) + + // NOTE: "/store/key1" tells us KVStore + // and the final "/key" says to use the data as the + // key in the given KVStore ... + query := abci.RequestQuery{ + Path: "/store/key1/key", + Data: key, + } + tx := newTxCounter(0, 0) + + // query is empty before we do anything + res := app.Query(query) + require.Equal(t, 0, len(res.Value)) + + // query is still empty after a CheckTx + _, resTx, err := app.Check(aminoTxEncoder(), tx) + require.NoError(t, err) + require.NotNil(t, resTx) + res = app.Query(query) + require.Equal(t, 0, len(res.Value)) + + // query is still empty after a DeliverTx before we commit + header := tmproto.Header{Height: app.LastBlockHeight() + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + _, resTx, err = app.Deliver(aminoTxEncoder(), tx) + require.NoError(t, err) + require.NotNil(t, resTx) + res = app.Query(query) + require.Equal(t, 0, len(res.Value)) + + // query returns correct value after Commit + app.Commit() + res = app.Query(query) + require.Equal(t, value, res.Value) +} + +func TestGRPCQuery(t *testing.T) { + grpcQueryOpt := func(bapp *BaseApp) { + testdata.RegisterQueryServer( + bapp.GRPCQueryRouter(), + testdata.QueryImpl{}, + ) + } + + app := setupBaseApp(t, grpcQueryOpt) + + app.InitChain(abci.RequestInitChain{}) + header := tmproto.Header{Height: app.LastBlockHeight() + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + app.Commit() + + req := testdata.SayHelloRequest{Name: "foo"} + reqBz, err := req.Marshal() + require.NoError(t, err) + + reqQuery := abci.RequestQuery{ + Data: reqBz, + Path: "/testdata.Query/SayHello", + } + + resQuery := app.Query(reqQuery) + + require.Equal(t, abci.CodeTypeOK, resQuery.Code, resQuery) + + var res testdata.SayHelloResponse + err = res.Unmarshal(resQuery.Value) + require.NoError(t, err) + require.Equal(t, "Hello foo!", res.Greeting) +} + +// Test p2p filter queries +func TestP2PQuery(t *testing.T) { + addrPeerFilterOpt := func(bapp *BaseApp) { + bapp.SetAddrPeerFilter(func(addrport string) abci.ResponseQuery { + require.Equal(t, "1.1.1.1:8000", addrport) + return abci.ResponseQuery{Code: uint32(3)} + }) + } + + idPeerFilterOpt := func(bapp *BaseApp) { + bapp.SetIDPeerFilter(func(id string) abci.ResponseQuery { + require.Equal(t, "testid", id) + return abci.ResponseQuery{Code: uint32(4)} + }) + } + + app := setupBaseApp(t, addrPeerFilterOpt, idPeerFilterOpt) + + addrQuery := abci.RequestQuery{ + Path: "/p2p/filter/addr/1.1.1.1:8000", + } + res := app.Query(addrQuery) + require.Equal(t, uint32(3), res.Code) + + idQuery := abci.RequestQuery{ + Path: "/p2p/filter/id/testid", + } + res = app.Query(idQuery) + require.Equal(t, uint32(4), res.Code) +} + +// One call to DeliverTx should process all the messages, in order. +func TestMultiMsgDeliverTx(t *testing.T) { + // increment the tx counter + anteKey := []byte("ante-key") + anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } + + // increment the msg counter + deliverKey := []byte("deliver-key") + deliverKey2 := []byte("deliver-key2") + routerOpt := func(bapp *BaseApp) { + r1 := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) + r2 := sdk.NewRoute(routeMsgCounter2, handlerMsgCounter(t, capKey1, deliverKey2)) + bapp.Router().AddRoute(r1) + bapp.Router().AddRoute(r2) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + + // Create same codec used in txDecoder + codec := codec.NewLegacyAmino() + registerTestCodec(codec) + + // run a multi-msg tx + // with all msgs the same route + + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + tx := newTxCounter(0, 0, 1, 2) + txBytes, err := codec.Marshal(tx) + require.NoError(t, err) + res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) + + store := app.deliverState.ctx.KVStore(capKey1) + + // tx counter only incremented once + txCounter := getIntFromStore(store, anteKey) + require.Equal(t, int64(1), txCounter) + + // msg counter incremented three times + msgCounter := getIntFromStore(store, deliverKey) + require.Equal(t, int64(3), msgCounter) + + // replace the second message with a msgCounter2 + + tx = newTxCounter(1, 3) + tx.Msgs = append(tx.Msgs, msgCounter2{0}) + tx.Msgs = append(tx.Msgs, msgCounter2{1}) + txBytes, err = codec.Marshal(tx) + require.NoError(t, err) + res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) + + store = app.deliverState.ctx.KVStore(capKey1) + + // tx counter only incremented once + txCounter = getIntFromStore(store, anteKey) + require.Equal(t, int64(2), txCounter) + + // original counter increments by one + // new counter increments by two + msgCounter = getIntFromStore(store, deliverKey) + require.Equal(t, int64(4), msgCounter) + msgCounter2 := getIntFromStore(store, deliverKey2) + require.Equal(t, int64(2), msgCounter2) +} + +// Interleave calls to Check and Deliver and ensure +// that there is no cross-talk. Check sees results of the previous Check calls +// and Deliver sees that of the previous Deliver calls, but they don't see eachother. +func TestConcurrentCheckDeliver(t *testing.T) { + // TODO +} + +// Simulate a transaction that uses gas to compute the gas. +// Simulate() and Query("/app/simulate", txBytes) should give +// the same results. +func TestSimulateTx(t *testing.T) { + gasConsumed := uint64(5) + + anteOpt := func(bapp *BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasConsumed)) + return + }) + } + + routerOpt := func(bapp *BaseApp) { + r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + ctx.GasMeter().ConsumeGas(gasConsumed, "test") + return &sdk.Result{}, nil + }) + bapp.Router().AddRoute(r) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + + app.InitChain(abci.RequestInitChain{}) + + // Create same codec used in txDecoder + cdc := codec.NewLegacyAmino() + registerTestCodec(cdc) + + nBlocks := 3 + for blockN := 0; blockN < nBlocks; blockN++ { + count := int64(blockN + 1) + header := tmproto.Header{Height: count} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + tx := newTxCounter(count, count) + txBytes, err := cdc.Marshal(tx) + require.Nil(t, err) + + // simulate a message, check gas reported + gInfo, result, err := app.Simulate(txBytes) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, gasConsumed, gInfo.GasUsed) + + // simulate again, same result + gInfo, result, err = app.Simulate(txBytes) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, gasConsumed, gInfo.GasUsed) + + // simulate by calling Query with encoded tx + query := abci.RequestQuery{ + Path: "/app/simulate", + Data: txBytes, + } + queryResult := app.Query(query) + require.True(t, queryResult.IsOK(), queryResult.Log) + + var simRes sdk.SimulationResponse + require.NoError(t, jsonpb.Unmarshal(strings.NewReader(string(queryResult.Value)), &simRes)) + + require.Equal(t, gInfo, simRes.GasInfo) + require.Equal(t, result.Log, simRes.Result.Log) + require.Equal(t, result.Events, simRes.Result.Events) + require.True(t, bytes.Equal(result.Data, simRes.Result.Data)) + + app.EndBlock(abci.RequestEndBlock{}) + app.Commit() + } +} + +func TestRunInvalidTransaction(t *testing.T) { + anteOpt := func(bapp *BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + return + }) + } + routerOpt := func(bapp *BaseApp) { + r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + return &sdk.Result{}, nil + }) + bapp.Router().AddRoute(r) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + // transaction with no messages + { + emptyTx := &txTest{} + _, result, err := app.Deliver(aminoTxEncoder(), emptyTx) + require.Error(t, err) + require.Nil(t, result) + + space, code, _ := sdkerrors.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrInvalidRequest.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrInvalidRequest.ABCICode(), code, err) + } + + // transaction where ValidateBasic fails + { + testCases := []struct { + tx *txTest + fail bool + }{ + {newTxCounter(0, 0), false}, + {newTxCounter(-1, 0), false}, + {newTxCounter(100, 100), false}, + {newTxCounter(100, 5, 4, 3, 2, 1), false}, + + {newTxCounter(0, -1), true}, + {newTxCounter(0, 1, -2), true}, + {newTxCounter(0, 1, 2, -10, 5), true}, + } + + for _, testCase := range testCases { + tx := testCase.tx + _, result, err := app.Deliver(aminoTxEncoder(), tx) + + if testCase.fail { + require.Error(t, err) + + space, code, _ := sdkerrors.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrInvalidSequence.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrInvalidSequence.ABCICode(), code, err) + } else { + require.NotNil(t, result) + } + } + } + + // transaction with no known route + { + unknownRouteTx := txTest{[]sdk.Msg{msgNoRoute{}}, 0, false} + _, result, err := app.Deliver(aminoTxEncoder(), unknownRouteTx) + require.Error(t, err) + require.Nil(t, result) + + space, code, _ := sdkerrors.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrUnknownRequest.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrUnknownRequest.ABCICode(), code, err) + + unknownRouteTx = txTest{[]sdk.Msg{msgCounter{}, msgNoRoute{}}, 0, false} + _, result, err = app.Deliver(aminoTxEncoder(), unknownRouteTx) + require.Error(t, err) + require.Nil(t, result) + + space, code, _ = sdkerrors.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrUnknownRequest.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrUnknownRequest.ABCICode(), code, err) + } + + // Transaction with an unregistered message + { + tx := newTxCounter(0, 0) + tx.Msgs = append(tx.Msgs, msgNoDecode{}) + + // new codec so we can encode the tx, but we shouldn't be able to decode + newCdc := codec.NewLegacyAmino() + registerTestCodec(newCdc) + newCdc.RegisterConcrete(&msgNoDecode{}, "cosmos-sdk/baseapp/msgNoDecode", nil) + + txBytes, err := newCdc.Marshal(tx) + require.NoError(t, err) + + res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.EqualValues(t, sdkerrors.ErrTxDecode.ABCICode(), res.Code) + require.EqualValues(t, sdkerrors.ErrTxDecode.Codespace(), res.Codespace) + } +} + +// Test that transactions exceeding gas limits fail +func TestTxGasLimits(t *testing.T) { + gasGranted := uint64(10) + anteOpt := func(bapp *BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasGranted)) + + // AnteHandlers must have their own defer/recover in order for the BaseApp + // to know how much gas was used! This is because the GasMeter is created in + // the AnteHandler, but if it panics the context won't be set properly in + // runTx's recover call. + defer func() { + if r := recover(); r != nil { + switch rType := r.(type) { + case sdk.ErrorOutOfGas: + err = sdkerrors.Wrapf(sdkerrors.ErrOutOfGas, "out of gas in location: %v", rType.Descriptor) + default: + panic(r) + } + } + }() + + count := tx.(txTest).Counter + newCtx.GasMeter().ConsumeGas(uint64(count), "counter-ante") + + return newCtx, nil + }) + } + + routerOpt := func(bapp *BaseApp) { + r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + count := msg.(*msgCounter).Counter + ctx.GasMeter().ConsumeGas(uint64(count), "counter-handler") + return &sdk.Result{}, nil + }) + bapp.Router().AddRoute(r) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + testCases := []struct { + tx *txTest + gasUsed uint64 + fail bool + }{ + {newTxCounter(0, 0), 0, false}, + {newTxCounter(1, 1), 2, false}, + {newTxCounter(9, 1), 10, false}, + {newTxCounter(1, 9), 10, false}, + {newTxCounter(10, 0), 10, false}, + {newTxCounter(0, 10), 10, false}, + {newTxCounter(0, 8, 2), 10, false}, + {newTxCounter(0, 5, 1, 1, 1, 1, 1), 10, false}, + {newTxCounter(0, 5, 1, 1, 1, 1), 9, false}, + + {newTxCounter(9, 2), 11, true}, + {newTxCounter(2, 9), 11, true}, + {newTxCounter(9, 1, 1), 11, true}, + {newTxCounter(1, 8, 1, 1), 11, true}, + {newTxCounter(11, 0), 11, true}, + {newTxCounter(0, 11), 11, true}, + {newTxCounter(0, 5, 11), 16, true}, + } + + for i, tc := range testCases { + tx := tc.tx + gInfo, result, err := app.Deliver(aminoTxEncoder(), tx) + + // check gas used and wanted + require.Equal(t, tc.gasUsed, gInfo.GasUsed, fmt.Sprintf("tc #%d; gas: %v, result: %v, err: %s", i, gInfo, result, err)) + + // check for out of gas + if !tc.fail { + require.NotNil(t, result, fmt.Sprintf("%d: %v, %v", i, tc, err)) + } else { + require.Error(t, err) + require.Nil(t, result) + + space, code, _ := sdkerrors.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrOutOfGas.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrOutOfGas.ABCICode(), code, err) + } + } +} + +// Test that transactions exceeding gas limits fail +func TestMaxBlockGasLimits(t *testing.T) { + gasGranted := uint64(10) + anteOpt := func(bapp *BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasGranted)) + + defer func() { + if r := recover(); r != nil { + switch rType := r.(type) { + case sdk.ErrorOutOfGas: + err = sdkerrors.Wrapf(sdkerrors.ErrOutOfGas, "out of gas in location: %v", rType.Descriptor) + default: + panic(r) + } + } + }() + + count := tx.(txTest).Counter + newCtx.GasMeter().ConsumeGas(uint64(count), "counter-ante") + + return + }) + } + + routerOpt := func(bapp *BaseApp) { + r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + count := msg.(*msgCounter).Counter + ctx.GasMeter().ConsumeGas(uint64(count), "counter-handler") + return &sdk.Result{}, nil + }) + bapp.Router().AddRoute(r) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + app.InitChain(abci.RequestInitChain{ + ConsensusParams: &abci.ConsensusParams{ + Block: &abci.BlockParams{ + MaxGas: 100, + }, + }, + }) + + testCases := []struct { + tx *txTest + numDelivers int + gasUsedPerDeliver uint64 + fail bool + failAfterDeliver int + }{ + {newTxCounter(0, 0), 0, 0, false, 0}, + {newTxCounter(9, 1), 2, 10, false, 0}, + {newTxCounter(10, 0), 3, 10, false, 0}, + {newTxCounter(10, 0), 10, 10, false, 0}, + {newTxCounter(2, 7), 11, 9, false, 0}, + {newTxCounter(10, 0), 10, 10, false, 0}, // hit the limit but pass + + {newTxCounter(10, 0), 11, 10, true, 10}, + {newTxCounter(10, 0), 15, 10, true, 10}, + {newTxCounter(9, 0), 12, 9, true, 11}, // fly past the limit + } + + for i, tc := range testCases { + tx := tc.tx + + // reset the block gas + header := tmproto.Header{Height: app.LastBlockHeight() + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + // execute the transaction multiple times + for j := 0; j < tc.numDelivers; j++ { + _, result, err := app.Deliver(aminoTxEncoder(), tx) + + ctx := app.getState(runTxModeDeliver).ctx + + // check for failed transactions + if tc.fail && (j+1) > tc.failAfterDeliver { + require.Error(t, err, fmt.Sprintf("tc #%d; result: %v, err: %s", i, result, err)) + require.Nil(t, result, fmt.Sprintf("tc #%d; result: %v, err: %s", i, result, err)) + + space, code, _ := sdkerrors.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrOutOfGas.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrOutOfGas.ABCICode(), code, err) + require.True(t, ctx.BlockGasMeter().IsOutOfGas()) + } else { + // check gas used and wanted + blockGasUsed := ctx.BlockGasMeter().GasConsumed() + expBlockGasUsed := tc.gasUsedPerDeliver * uint64(j+1) + require.Equal( + t, expBlockGasUsed, blockGasUsed, + fmt.Sprintf("%d,%d: %v, %v, %v, %v", i, j, tc, expBlockGasUsed, blockGasUsed, result), + ) + + require.NotNil(t, result, fmt.Sprintf("tc #%d; currDeliver: %d, result: %v, err: %s", i, j, result, err)) + require.False(t, ctx.BlockGasMeter().IsPastLimit()) + } + } + } +} + +// Test custom panic handling within app.DeliverTx method +func TestCustomRunTxPanicHandler(t *testing.T) { + const customPanicMsg = "test panic" + anteErr := sdkerrors.Register("fakeModule", 100500, "fakeError") + + anteOpt := func(bapp *BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + panic(sdkerrors.Wrap(anteErr, "anteHandler")) + }) + } + routerOpt := func(bapp *BaseApp) { + r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + return &sdk.Result{}, nil + }) + bapp.Router().AddRoute(r) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + app.AddRunTxRecoveryHandler(func(recoveryObj interface{}) error { + err, ok := recoveryObj.(error) + if !ok { + return nil + } + + if anteErr.Is(err) { + panic(customPanicMsg) + } else { + return nil + } + }) + + // Transaction should panic with custom handler above + { + tx := newTxCounter(0, 0) + + require.PanicsWithValue(t, customPanicMsg, func() { app.Deliver(aminoTxEncoder(), tx) }) + } +} + +func TestBaseAppAnteHandler(t *testing.T) { + anteKey := []byte("ante-key") + anteOpt := func(bapp *BaseApp) { + bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) + } + + deliverKey := []byte("deliver-key") + routerOpt := func(bapp *BaseApp) { + r := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) + bapp.Router().AddRoute(r) + } + + cdc := codec.NewLegacyAmino() + app := setupBaseApp(t, anteOpt, routerOpt) + + app.InitChain(abci.RequestInitChain{}) + registerTestCodec(cdc) + + header := tmproto.Header{Height: app.LastBlockHeight() + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + // execute a tx that will fail ante handler execution + // + // NOTE: State should not be mutated here. This will be implicitly checked by + // the next txs ante handler execution (anteHandlerTxTest). + tx := newTxCounter(0, 0) + tx.setFailOnAnte(true) + txBytes, err := cdc.Marshal(tx) + require.NoError(t, err) + res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.Empty(t, res.Events) + require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) + + ctx := app.getState(runTxModeDeliver).ctx + store := ctx.KVStore(capKey1) + require.Equal(t, int64(0), getIntFromStore(store, anteKey)) + + // execute at tx that will pass the ante handler (the checkTx state should + // mutate) but will fail the message handler + tx = newTxCounter(0, 0) + tx.setFailOnHandler(true) + + txBytes, err = cdc.Marshal(tx) + require.NoError(t, err) + + res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + // should emit ante event + require.NotEmpty(t, res.Events) + require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) + + ctx = app.getState(runTxModeDeliver).ctx + store = ctx.KVStore(capKey1) + require.Equal(t, int64(1), getIntFromStore(store, anteKey)) + require.Equal(t, int64(0), getIntFromStore(store, deliverKey)) + + // execute a successful ante handler and message execution where state is + // implicitly checked by previous tx executions + tx = newTxCounter(1, 0) + + txBytes, err = cdc.Marshal(tx) + require.NoError(t, err) + + res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.NotEmpty(t, res.Events) + require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) + + ctx = app.getState(runTxModeDeliver).ctx + store = ctx.KVStore(capKey1) + require.Equal(t, int64(2), getIntFromStore(store, anteKey)) + require.Equal(t, int64(1), getIntFromStore(store, deliverKey)) + + // commit + app.EndBlock(abci.RequestEndBlock{}) + app.Commit() +} + +func TestGasConsumptionBadTx(t *testing.T) { + gasWanted := uint64(5) + anteOpt := func(bapp *BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasWanted)) + + defer func() { + if r := recover(); r != nil { + switch rType := r.(type) { + case sdk.ErrorOutOfGas: + log := fmt.Sprintf("out of gas in location: %v", rType.Descriptor) + err = sdkerrors.Wrap(sdkerrors.ErrOutOfGas, log) + default: + panic(r) + } + } + }() + + txTest := tx.(txTest) + newCtx.GasMeter().ConsumeGas(uint64(txTest.Counter), "counter-ante") + if txTest.FailOnAnte { + return newCtx, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "ante handler failure") + } + + return + }) + } + + routerOpt := func(bapp *BaseApp) { + r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + count := msg.(*msgCounter).Counter + ctx.GasMeter().ConsumeGas(uint64(count), "counter-handler") + return &sdk.Result{}, nil + }) + bapp.Router().AddRoute(r) + } + + cdc := codec.NewLegacyAmino() + registerTestCodec(cdc) + + app := setupBaseApp(t, anteOpt, routerOpt) + app.InitChain(abci.RequestInitChain{ + ConsensusParams: &abci.ConsensusParams{ + Block: &abci.BlockParams{ + MaxGas: 9, + }, + }, + }) + + app.InitChain(abci.RequestInitChain{}) + + header := tmproto.Header{Height: app.LastBlockHeight() + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + tx := newTxCounter(5, 0) + tx.setFailOnAnte(true) + txBytes, err := cdc.Marshal(tx) + require.NoError(t, err) + + res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) + + // require next tx to fail due to black gas limit + tx = newTxCounter(5, 0) + txBytes, err = cdc.Marshal(tx) + require.NoError(t, err) + + res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) +} + +func TestInitChainer(t *testing.T) { + name := t.Name() + // keep the db and logger ourselves so + // we can reload the same app later + db := dbm.NewMemDB() + logger := defaultLogger() + app := NewBaseApp(name, logger, db, nil) + capKey := sdk.NewKVStoreKey("main") + capKey2 := sdk.NewKVStoreKey("key2") + app.MountStores(capKey, capKey2) + + // set a value in the store on init chain + key, value := []byte("hello"), []byte("goodbye") + var initChainer sdk.InitChainer = func(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain { + store := ctx.KVStore(capKey) + store.Set(key, value) + return abci.ResponseInitChain{} + } + + query := abci.RequestQuery{ + Path: "/store/main/key", + Data: key, + } + + // initChainer is nil - nothing happens + app.InitChain(abci.RequestInitChain{}) + res := app.Query(query) + require.Equal(t, 0, len(res.Value)) + + // set initChainer and try again - should see the value + app.SetInitChainer(initChainer) + + // stores are mounted and private members are set - sealing baseapp + err := app.LoadLatestVersion() // needed to make stores non-nil + require.Nil(t, err) + require.Equal(t, int64(0), app.LastBlockHeight()) + + initChainRes := app.InitChain(abci.RequestInitChain{AppStateBytes: []byte("{}"), ChainId: "test-chain-id"}) // must have valid JSON genesis file, even if empty + + // The AppHash returned by a new chain is the sha256 hash of "". + // $ echo -n '' | sha256sum + // e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + require.Equal( + t, + []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}, + initChainRes.AppHash, + ) + + // assert that chainID is set correctly in InitChain + chainID := app.deliverState.ctx.ChainID() + require.Equal(t, "test-chain-id", chainID, "ChainID in deliverState not set correctly in InitChain") + + chainID = app.checkState.ctx.ChainID() + require.Equal(t, "test-chain-id", chainID, "ChainID in checkState not set correctly in InitChain") + + app.Commit() + res = app.Query(query) + require.Equal(t, int64(1), app.LastBlockHeight()) + require.Equal(t, value, res.Value) + + // reload app + app = NewBaseApp(name, logger, db, nil) + app.SetInitChainer(initChainer) + app.MountStores(capKey, capKey2) + err = app.LoadLatestVersion() // needed to make stores non-nil + require.Nil(t, err) + require.Equal(t, int64(1), app.LastBlockHeight()) + + // ensure we can still query after reloading + res = app.Query(query) + require.Equal(t, value, res.Value) + + // commit and ensure we can still query + header := tmproto.Header{Height: app.LastBlockHeight() + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + app.Commit() + + res = app.Query(query) + require.Equal(t, value, res.Value) +} + +func TestInitChain_WithInitialHeight(t *testing.T) { + name := t.Name() + db := dbm.NewMemDB() + logger := defaultLogger() + app := NewBaseApp(name, logger, db, nil) + + app.InitChain( + abci.RequestInitChain{ + InitialHeight: 3, + }, + ) + app.Commit() + + require.Equal(t, int64(3), app.LastBlockHeight()) +} + +func TestBeginBlock_WithInitialHeight(t *testing.T) { + name := t.Name() + db := dbm.NewMemDB() + logger := defaultLogger() + app := NewBaseApp(name, logger, db, nil) + + app.InitChain( + abci.RequestInitChain{ + InitialHeight: 3, + }, + ) + + require.PanicsWithError(t, "invalid height: 4; expected: 3", func() { + app.BeginBlock(abci.RequestBeginBlock{ + Header: tmproto.Header{ + Height: 4, + }, + }) + }) + + app.BeginBlock(abci.RequestBeginBlock{ + Header: tmproto.Header{ + Height: 3, + }, + }) + app.Commit() + + require.Equal(t, int64(3), app.LastBlockHeight()) +} + +// Simple tx with a list of Msgs. +type txTest struct { + Msgs []sdk.Msg + Counter int64 + FailOnAnte bool +} + +func (tx *txTest) setFailOnAnte(fail bool) { + tx.FailOnAnte = fail +} + +func (tx *txTest) setFailOnHandler(fail bool) { + for i, msg := range tx.Msgs { + tx.Msgs[i] = msgCounter{msg.(msgCounter).Counter, fail} + } +} + +// Implements Tx +func (tx txTest) GetMsgs() []sdk.Msg { return tx.Msgs } +func (tx txTest) ValidateBasic() error { return nil } + +const ( + routeMsgCounter = "msgCounter" + routeMsgCounter2 = "msgCounter2" + routeMsgKeyValue = "msgKeyValue" +) + +// ValidateBasic() fails on negative counters. +// Otherwise it's up to the handlers +type msgCounter struct { + Counter int64 + FailOnHandler bool +} + +// dummy implementation of proto.Message +func (msg msgCounter) Reset() {} +func (msg msgCounter) String() string { return "TODO" } +func (msg msgCounter) ProtoMessage() {} + +// Implements Msg +func (msg msgCounter) Route() string { return routeMsgCounter } +func (msg msgCounter) Type() string { return "counter1" } +func (msg msgCounter) GetSignBytes() []byte { return nil } +func (msg msgCounter) GetSigners() []sdk.AccAddress { return nil } +func (msg msgCounter) ValidateBasic() error { + if msg.Counter >= 0 { + return nil + } + return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") +} + +func newTxCounter(counter int64, msgCounters ...int64) *txTest { + msgs := make([]sdk.Msg, 0, len(msgCounters)) + for _, c := range msgCounters { + msgs = append(msgs, msgCounter{c, false}) + } + + return &txTest{msgs, counter, false} +} + +// a msg we dont know how to route +type msgNoRoute struct { + msgCounter +} + +func (tx msgNoRoute) Route() string { return "noroute" } + +// a msg we dont know how to decode +type msgNoDecode struct { + msgCounter +} + +func (tx msgNoDecode) Route() string { return routeMsgCounter } + +// Another counter msg. Duplicate of msgCounter +type msgCounter2 struct { + Counter int64 +} + +// dummy implementation of proto.Message +func (msg msgCounter2) Reset() {} +func (msg msgCounter2) String() string { return "TODO" } +func (msg msgCounter2) ProtoMessage() {} + +// Implements Msg +func (msg msgCounter2) Route() string { return routeMsgCounter2 } +func (msg msgCounter2) Type() string { return "counter2" } +func (msg msgCounter2) GetSignBytes() []byte { return nil } +func (msg msgCounter2) GetSigners() []sdk.AccAddress { return nil } +func (msg msgCounter2) ValidateBasic() error { + if msg.Counter >= 0 { + return nil + } + return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") +} + +// A msg that sets a key/value pair. +type msgKeyValue struct { + Key []byte + Value []byte +} + +func (msg msgKeyValue) Reset() {} +func (msg msgKeyValue) String() string { return "TODO" } +func (msg msgKeyValue) ProtoMessage() {} +func (msg msgKeyValue) Route() string { return routeMsgKeyValue } +func (msg msgKeyValue) Type() string { return "keyValue" } +func (msg msgKeyValue) GetSignBytes() []byte { return nil } +func (msg msgKeyValue) GetSigners() []sdk.AccAddress { return nil } +func (msg msgKeyValue) ValidateBasic() error { + if msg.Key == nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "key cannot be nil") + } + if msg.Value == nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "value cannot be nil") + } + return nil +} + +// amino decode +func testTxDecoder(cdc *codec.LegacyAmino) sdk.TxDecoder { + return func(txBytes []byte) (sdk.Tx, error) { + var tx txTest + if len(txBytes) == 0 { + return nil, sdkerrors.Wrap(sdkerrors.ErrTxDecode, "tx bytes are empty") + } + + err := cdc.Unmarshal(txBytes, &tx) + if err != nil { + return nil, sdkerrors.ErrTxDecode + } + + return tx, nil + } +} + +func anteHandlerTxTest(t *testing.T, capKey sdk.StoreKey, storeKey []byte) sdk.AnteHandler { + return func(ctx sdk.Context, tx sdk.Tx, simulate bool) (sdk.Context, error) { + store := ctx.KVStore(capKey) + txTest := tx.(txTest) + + if txTest.FailOnAnte { + return ctx, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "ante handler failure") + } + + _, err := incrementingCounter(t, store, storeKey, txTest.Counter) + if err != nil { + return ctx, err + } + + ctx.EventManager().EmitEvents( + counterEvent("ante_handler", txTest.Counter), + ) + + return ctx, nil + } +} + +func counterEvent(evType string, msgCount int64) sdk.Events { + return sdk.Events{ + sdk.NewEvent( + evType, + sdk.NewAttribute("update_counter", fmt.Sprintf("%d", msgCount)), + ), + } +} + +func handlerMsgCounter(t *testing.T, capKey sdk.StoreKey, deliverKey []byte) sdk.Handler { + return func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + ctx = ctx.WithEventManager(sdk.NewEventManager()) + store := ctx.KVStore(capKey) + var msgCount int64 + + switch m := msg.(type) { + case *msgCounter: + if m.FailOnHandler { + return nil, sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "message handler failure") + } + + msgCount = m.Counter + case *msgCounter2: + msgCount = m.Counter + } + + ctx.EventManager().EmitEvents( + counterEvent(sdk.EventTypeMessage, msgCount), + ) + + res, err := incrementingCounter(t, store, deliverKey, msgCount) + if err != nil { + return nil, err + } + + res.Events = ctx.EventManager().Events().ToABCIEvents() + return res, nil + } +} + +func getIntFromStore(store sdk.KVStore, key []byte) int64 { + bz := store.Get(key) + if len(bz) == 0 { + return 0 + } + i, err := binary.ReadVarint(bytes.NewBuffer(bz)) + if err != nil { + panic(err) + } + return i +} + +func setIntOnStore(store sdk.KVStore, key []byte, i int64) { + bz := make([]byte, 8) + n := binary.PutVarint(bz, i) + store.Set(key, bz[:n]) +} + +// check counter matches what's in store. +// increment and store +func incrementingCounter(t *testing.T, store sdk.KVStore, counterKey []byte, counter int64) (*sdk.Result, error) { + storedCounter := getIntFromStore(store, counterKey) + require.Equal(t, storedCounter, counter) + setIntOnStore(store, counterKey, counter+1) + return &sdk.Result{}, nil +} + +//--------------------------------------------------------------------- +// Tx processing - CheckTx, DeliverTx, SimulateTx. +// These tests use the serialized tx as input, while most others will use the +// Check(), Deliver(), Simulate() methods directly. +// Ensure that Check/Deliver/Simulate work as expected with the store. + +// Test that successive CheckTx can see each others' effects +// on the store within a block, and that the CheckTx state +// gets reset to the latest committed state during Commit +func TestCheckTx(t *testing.T) { + // This ante handler reads the key and checks that the value matches the current counter. + // This ensures changes to the kvstore persist across successive CheckTx. + counterKey := []byte("counter-key") + + anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, counterKey)) } + routerOpt := func(bapp *BaseApp) { + // TODO: can remove this once CheckTx doesnt process msgs. + bapp.Router().AddRoute(sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + return &sdk.Result{}, nil + })) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + + nTxs := int64(5) + app.InitChain(abci.RequestInitChain{}) + + // Create same codec used in txDecoder + codec := codec.NewLegacyAmino() + registerTestCodec(codec) + + for i := int64(0); i < nTxs; i++ { + tx := newTxCounter(i, 0) // no messages + txBytes, err := codec.Marshal(tx) + require.NoError(t, err) + r := app.CheckTx(abci.RequestCheckTx{Tx: txBytes}) + require.Empty(t, r.GetEvents()) + require.True(t, r.IsOK(), fmt.Sprintf("%v", r)) + } + + checkStateStore := app.checkState.ctx.KVStore(capKey1) + storedCounter := getIntFromStore(checkStateStore, counterKey) + + // Ensure AnteHandler ran + require.Equal(t, nTxs, storedCounter) + + // If a block is committed, CheckTx state should be reset. + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header, Hash: []byte("hash")}) + + require.NotNil(t, app.checkState.ctx.BlockGasMeter(), "block gas meter should have been set to checkState") + require.NotEmpty(t, app.checkState.ctx.HeaderHash()) + + app.EndBlock(abci.RequestEndBlock{}) + app.Commit() + + checkStateStore = app.checkState.ctx.KVStore(capKey1) + storedBytes := checkStateStore.Get(counterKey) + require.Nil(t, storedBytes) +} + +// Test that successive DeliverTx can see each others' effects +// on the store, both within and across blocks. +func TestDeliverTx(t *testing.T) { + // test increments in the ante + anteKey := []byte("ante-key") + anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } + + // test increments in the handler + deliverKey := []byte("deliver-key") + routerOpt := func(bapp *BaseApp) { + r := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) + bapp.Router().AddRoute(r) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + app.InitChain(abci.RequestInitChain{}) + + // Create same codec used in txDecoder + codec := codec.NewLegacyAmino() + registerTestCodec(codec) + + nBlocks := 3 + txPerHeight := 5 + + for blockN := 0; blockN < nBlocks; blockN++ { + header := tmproto.Header{Height: int64(blockN) + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + for i := 0; i < txPerHeight; i++ { + counter := int64(blockN*txPerHeight + i) + tx := newTxCounter(counter, counter) + + txBytes, err := codec.Marshal(tx) + require.NoError(t, err) + + res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) + events := res.GetEvents() + require.Len(t, events, 3, "should contain ante handler, message type and counter events respectively") + require.Equal(t, sdk.MarkEventsToIndex(counterEvent("ante_handler", counter).ToABCIEvents(), map[string]struct{}{})[0], events[0], "ante handler event") + require.Equal(t, sdk.MarkEventsToIndex(counterEvent(sdk.EventTypeMessage, counter).ToABCIEvents(), map[string]struct{}{})[0], events[2], "msg handler update counter event") + } + + app.EndBlock(abci.RequestEndBlock{}) + app.Commit() + } +} + +func TestOptionFunction(t *testing.T) { + logger := defaultLogger() + db := dbm.NewMemDB() + bap := NewBaseApp("starting name", logger, db, nil, testChangeNameHelper("new name")) + require.Equal(t, bap.name, "new name", "BaseApp should have had name changed via option function") +} + +func testChangeNameHelper(name string) func(*BaseApp) { + return func(bap *BaseApp) { + bap.name = name + } +} + +// Test that txs can be unmarshalled and read and that +// correct error codes are returned when not +func TestTxDecoder(t *testing.T) { + codec := codec.NewLegacyAmino() + registerTestCodec(codec) + + app := newBaseApp(t.Name()) + tx := newTxCounter(1, 0) + txBytes := codec.MustMarshal(tx) + + dTx, err := app.txDecoder(txBytes) + require.NoError(t, err) + + cTx := dTx.(txTest) + require.Equal(t, tx.Counter, cTx.Counter) +} + +// Test that Info returns the latest committed state. +func TestInfo(t *testing.T) { + app := newBaseApp(t.Name()) + + // ----- test an empty response ------- + reqInfo := abci.RequestInfo{} + res := app.Info(reqInfo) + + // should be empty + assert.Equal(t, "", res.Version) + assert.Equal(t, t.Name(), res.GetData()) + assert.Equal(t, int64(0), res.LastBlockHeight) + require.Equal(t, []uint8(nil), res.LastBlockAppHash) + require.Equal(t, app.AppVersion(), res.AppVersion) + // ----- test a proper response ------- + // TODO +} + +func TestBaseAppOptionSeal(t *testing.T) { + app := setupBaseApp(t) + + require.Panics(t, func() { + app.SetName("") + }) + require.Panics(t, func() { + app.SetVersion("") + }) + require.Panics(t, func() { + app.SetDB(nil) + }) + require.Panics(t, func() { + app.SetCMS(nil) + }) + require.Panics(t, func() { + app.SetInitChainer(nil) + }) + require.Panics(t, func() { + app.SetBeginBlocker(nil) + }) + require.Panics(t, func() { + app.SetEndBlocker(nil) + }) + require.Panics(t, func() { + app.SetAnteHandler(nil) + }) + require.Panics(t, func() { + app.SetAddrPeerFilter(nil) + }) + require.Panics(t, func() { + app.SetIDPeerFilter(nil) + }) + require.Panics(t, func() { + app.SetFauxMerkleMode() + }) + require.Panics(t, func() { + app.SetRouter(NewRouter()) + }) +} + +func TestVersionSetterGetter(t *testing.T) { + logger := defaultLogger() + pruningOpt := SetPruning(store.PruneDefault) + db := dbm.NewMemDB() + name := t.Name() + app := NewBaseApp(name, logger, db, nil, pruningOpt) + + require.Equal(t, "", app.Version()) + res := app.Query(abci.RequestQuery{Path: "app/version"}) + require.True(t, res.IsOK()) + require.Equal(t, "", string(res.Value)) + + versionString := "1.0.0" + app.SetVersion(versionString) + require.Equal(t, versionString, app.Version()) + res = app.Query(abci.RequestQuery{Path: "app/version"}) + require.True(t, res.IsOK()) + require.Equal(t, versionString, string(res.Value)) +} + +func TestLoadVersionInvalid(t *testing.T) { + logger := log.NewNopLogger() + pruningOpt := SetPruning(store.PruneNothing) + db := dbm.NewMemDB() + name := t.Name() + app := NewBaseApp(name, logger, db, nil, pruningOpt) + + err := app.LoadLatestVersion() + require.Nil(t, err) + + // require error when loading an invalid version + err = app.LoadVersion(-1) + require.Error(t, err) + + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + res := app.Commit() + commitID1 := sdk.CommitID{Version: 1, Hash: res.Data} + + // create a new app with the stores mounted under the same cap key + app = NewBaseApp(name, logger, db, nil, pruningOpt) + + // require we can load the latest version + err = app.LoadVersion(1) + require.Nil(t, err) + testLoadVersionHelper(t, app, int64(1), commitID1) + + // require error when loading an invalid version + err = app.LoadVersion(2) + require.Error(t, err) +} + +// simple one store baseapp with data and snapshots. Each tx is 1 MB in size (uncompressed). +func setupBaseAppWithSnapshots(t *testing.T, blocks uint, blockTxs int, options ...func(*BaseApp)) (*BaseApp, func()) { + codec := codec.NewLegacyAmino() + registerTestCodec(codec) + routerOpt := func(bapp *BaseApp) { + bapp.Router().AddRoute(sdk.NewRoute(routeMsgKeyValue, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + kv := msg.(*msgKeyValue) + bapp.cms.GetCommitKVStore(capKey2).Set(kv.Key, kv.Value) + return &sdk.Result{}, nil + })) + } + + snapshotInterval := uint64(2) + snapshotTimeout := 1 * time.Minute + snapshotDir, err := os.MkdirTemp("", "baseapp") + require.NoError(t, err) + snapshotStore, err := snapshots.NewStore(dbm.NewMemDB(), snapshotDir) + require.NoError(t, err) + teardown := func() { + os.RemoveAll(snapshotDir) + } + + app := setupBaseApp(t, append(options, + SetSnapshotStore(snapshotStore), + SetSnapshotInterval(snapshotInterval), + SetPruning(sdk.PruningOptions{KeepEvery: 1}), + routerOpt)...) + + app.InitChain(abci.RequestInitChain{}) + + r := rand.New(rand.NewSource(3920758213583)) + keyCounter := 0 + for height := int64(1); height <= int64(blocks); height++ { + app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: height}}) + for txNum := 0; txNum < blockTxs; txNum++ { + tx := txTest{Msgs: []sdk.Msg{}} + for msgNum := 0; msgNum < 100; msgNum++ { + key := []byte(fmt.Sprintf("%v", keyCounter)) + value := make([]byte, 10000) + _, err := r.Read(value) + require.NoError(t, err) + tx.Msgs = append(tx.Msgs, msgKeyValue{Key: key, Value: value}) + keyCounter++ + } + txBytes, err := codec.Marshal(tx) + require.NoError(t, err) + resp := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.True(t, resp.IsOK(), "%v", resp.String()) + } + app.EndBlock(abci.RequestEndBlock{Height: height}) + app.Commit() + + // Wait for snapshot to be taken, since it happens asynchronously. + if uint64(height)%snapshotInterval == 0 { + start := time.Now() + for { + if time.Since(start) > snapshotTimeout { + t.Errorf("timed out waiting for snapshot after %v", snapshotTimeout) + } + snapshot, err := snapshotStore.Get(uint64(height), snapshottypes.CurrentFormat) + require.NoError(t, err) + if snapshot != nil { + break + } + time.Sleep(100 * time.Millisecond) + } + } + } + + return app, teardown +} + +func TestMountStores(t *testing.T) { + app := setupBaseApp(t) + + // check both stores + store1 := app.cms.GetCommitKVStore(capKey1) + require.NotNil(t, store1) + store2 := app.cms.GetCommitKVStore(capKey2) + require.NotNil(t, store2) +} + +// Test that we can make commits and then reload old versions. +// Test that LoadLatestVersion actually does. +func TestLoadVersion(t *testing.T) { + logger := defaultLogger() + pruningOpt := SetPruning(store.PruneNothing) + db := dbm.NewMemDB() + name := t.Name() + app := NewBaseApp(name, logger, db, nil, pruningOpt) + + // make a cap key and mount the store + err := app.LoadLatestVersion() // needed to make stores non-nil + require.Nil(t, err) + + emptyCommitID := sdk.CommitID{} + + // fresh store has zero/empty last commit + lastHeight := app.LastBlockHeight() + lastID := app.LastCommitID() + require.Equal(t, int64(0), lastHeight) + require.Equal(t, emptyCommitID, lastID) + + // execute a block, collect commit ID + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + res := app.Commit() + commitID1 := sdk.CommitID{Version: 1, Hash: res.Data} + + // execute a block, collect commit ID + header = tmproto.Header{Height: 2} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + res = app.Commit() + commitID2 := sdk.CommitID{Version: 2, Hash: res.Data} + + // reload with LoadLatestVersion + app = NewBaseApp(name, logger, db, nil, pruningOpt) + app.MountStores() + err = app.LoadLatestVersion() + require.Nil(t, err) + testLoadVersionHelper(t, app, int64(2), commitID2) + + // reload with LoadVersion, see if you can commit the same block and get + // the same result + app = NewBaseApp(name, logger, db, nil, pruningOpt) + err = app.LoadVersion(1) + require.Nil(t, err) + testLoadVersionHelper(t, app, int64(1), commitID1) + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + app.Commit() + testLoadVersionHelper(t, app, int64(2), commitID2) +} + +func useDefaultLoader(app *BaseApp) { + app.SetStoreLoader(DefaultStoreLoader) +} + +func initStore(t *testing.T, db dbm.DB, storeKey string, k, v []byte) { + rs := rootmulti.NewStore(db, log.NewNopLogger()) + rs.SetPruning(store.PruneNothing) + key := sdk.NewKVStoreKey(storeKey) + rs.MountStoreWithDB(key, store.StoreTypeIAVL, nil) + err := rs.LoadLatestVersion() + require.Nil(t, err) + require.Equal(t, int64(0), rs.LastCommitID().Version) + + // write some data in substore + kv, _ := rs.GetStore(key).(store.KVStore) + require.NotNil(t, kv) + kv.Set(k, v) + commitID := rs.Commit() + require.Equal(t, int64(1), commitID.Version) +} + +func checkStore(t *testing.T, db dbm.DB, ver int64, storeKey string, k, v []byte) { + rs := rootmulti.NewStore(db, log.NewNopLogger()) + rs.SetPruning(store.PruneDefault) + key := sdk.NewKVStoreKey(storeKey) + rs.MountStoreWithDB(key, store.StoreTypeIAVL, nil) + err := rs.LoadLatestVersion() + require.Nil(t, err) + require.Equal(t, ver, rs.LastCommitID().Version) + + // query data in substore + kv, _ := rs.GetStore(key).(store.KVStore) + require.NotNil(t, kv) + require.Equal(t, v, kv.Get(k)) +} + +// Test that we can make commits and then reload old versions. +// Test that LoadLatestVersion actually does. +func TestSetLoader(t *testing.T) { + cases := map[string]struct { + setLoader func(*BaseApp) + origStoreKey string + loadStoreKey string + }{ + "don't set loader": { + origStoreKey: "foo", + loadStoreKey: "foo", + }, + "default loader": { + setLoader: useDefaultLoader, + origStoreKey: "foo", + loadStoreKey: "foo", + }, + } + + k := []byte("key") + v := []byte("value") + + for name, tc := range cases { + tc := tc + t.Run(name, func(t *testing.T) { + // prepare a db with some data + db := dbm.NewMemDB() + initStore(t, db, tc.origStoreKey, k, v) + + // load the app with the existing db + opts := []func(*BaseApp){SetPruning(store.PruneNothing)} + if tc.setLoader != nil { + opts = append(opts, tc.setLoader) + } + app := NewBaseApp(t.Name(), defaultLogger(), db, nil, opts...) + app.MountStores(sdk.NewKVStoreKey(tc.loadStoreKey)) + err := app.LoadLatestVersion() + require.Nil(t, err) + + // "execute" one block + app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: 2}}) + res := app.Commit() + require.NotNil(t, res.Data) + + // check db is properly updated + checkStore(t, db, 2, tc.loadStoreKey, k, v) + checkStore(t, db, 2, tc.loadStoreKey, []byte("foo"), nil) + }) + } +} diff --git a/baseapp/msg_service_router.go b/baseapp/msg_service_router.go index 1b7f8f89bf..a8c997cf68 100644 --- a/baseapp/msg_service_router.go +++ b/baseapp/msg_service_router.go @@ -46,9 +46,9 @@ func (msr *MsgServiceRouter) HandlerByTypeURL(typeURL string) MsgServiceHandler // service description, handler is an object which implements that gRPC service. // // This function PANICs: -// - if it is called before the service `Msg`s have been registered using -// RegisterInterfaces, -// - or if a service is being registered twice. +// - if it is called before the service `Msg`s have been registered using +// RegisterInterfaces, +// - or if a service is being registered twice. func (msr *MsgServiceRouter) RegisterService(sd *grpc.ServiceDesc, handler interface{}) { // Adds a top-level query handler based on the gRPC service name. for _, method := range sd.Methods { @@ -112,6 +112,15 @@ func (msr *MsgServiceRouter) RegisterService(sd *grpc.ServiceDesc, handler inter goCtx = context.WithValue(goCtx, sdk.SdkContextKey, ctx) return handler(goCtx, req) } + if err := req.ValidateBasic(); err != nil { + if mm, ok := req.(getter1); ok { + if !mm.GetAmount().Amount.IsZero() { + return nil, err + } + } else { + return nil, err + } + } // Call the method handler from the service description with the handler object. // We don't do any decoding here because the decoding was already done. res, err := methodHandler(handler, sdk.WrapSDKContext(ctx), noopDecoder, interceptor) @@ -138,3 +147,7 @@ func noopDecoder(_ interface{}) error { return nil } func noopInterceptor(_ context.Context, _ interface{}, _ *grpc.UnaryServerInfo, _ grpc.UnaryHandler) (interface{}, error) { return nil, nil } + +type getter1 interface { + GetAmount() sdk.Coin +} diff --git a/baseapp/options.go b/baseapp/options.go index 5d336cb1bf..53d4a4b11c 100644 --- a/baseapp/options.go +++ b/baseapp/options.go @@ -62,6 +62,11 @@ func SetIAVLCacheSize(size int) func(*BaseApp) { return func(bapp *BaseApp) { bapp.cms.SetIAVLCacheSize(size) } } +// SetIAVLDisableFastNode enables(false)/disables(true) fast node usage from the IAVL store. +func SetIAVLDisableFastNode(disable bool) func(*BaseApp) { + return func(bapp *BaseApp) { bapp.cms.SetIAVLDisableFastNode(disable) } +} + // SetInterBlockCache provides a BaseApp option function that sets the // inter-block cache. func SetInterBlockCache(cache sdk.MultiStorePersistentCache) func(*BaseApp) { @@ -242,3 +247,14 @@ func (app *BaseApp) SetInterfaceRegistry(registry types.InterfaceRegistry) { app.grpcQueryRouter.SetInterfaceRegistry(registry) app.msgServiceRouter.SetInterfaceRegistry(registry) } + +// SetStreamingService is used to set a streaming service into the BaseApp hooks and load the listeners into the multistore +func (app *BaseApp) SetStreamingService(s StreamingService) { + // add the listeners for each StoreKey + for key, lis := range s.Listeners() { + app.cms.AddListeners(key, lis) + } + // register the StreamingService within the BaseApp + // BaseApp will pass BeginBlock, DeliverTx, and EndBlock requests and responses to the streaming services to update their ABCI context + app.abciListeners = append(app.abciListeners, s) +} diff --git a/baseapp/p2p.go b/baseapp/p2p.go new file mode 100644 index 0000000000..4c339dfad0 --- /dev/null +++ b/baseapp/p2p.go @@ -0,0 +1,66 @@ +package baseapp + +// This file exists because Tendermint allows the application to control which peers it connects to. +// This is for an interesting idea -- allow the application to control the peer layer/ topology! +// It would be really exciting to mix web of trust and expander-graph style primitives +// for how information gets disseminated. +// However the API surface for this to make sense isn't really well exposed / thought through, +// so this file mostly acts as confusing boilerplate. + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + abci "github.com/tendermint/tendermint/abci/types" +) + +type peerFilters struct { + addrPeerFilter sdk.PeerFilter // filter peers by address and port + idPeerFilter sdk.PeerFilter // filter peers by node ID +} + +// FilterPeerByAddrPort filters peers by address/port. +func (app *BaseApp) FilterPeerByAddrPort(info string) abci.ResponseQuery { + if app.addrPeerFilter != nil { + return app.addrPeerFilter(info) + } + + return abci.ResponseQuery{} +} + +// FilterPeerByID filters peers by node ID. +func (app *BaseApp) FilterPeerByID(info string) abci.ResponseQuery { + if app.idPeerFilter != nil { + return app.idPeerFilter(info) + } + + return abci.ResponseQuery{} +} + +func handleQueryP2P(app *BaseApp, path []string) abci.ResponseQuery { + // "/p2p" prefix for p2p queries + if len(path) < 4 { + return sdkerrors.QueryResultWithDebug( + sdkerrors.Wrap( + sdkerrors.ErrUnknownRequest, "path should be p2p filter ", + ), app.trace) + } + + var resp abci.ResponseQuery + + cmd, typ, arg := path[1], path[2], path[3] + switch cmd { + case "filter": + switch typ { + case "addr": + resp = app.FilterPeerByAddrPort(arg) + + case "id": + resp = app.FilterPeerByID(arg) + } + + default: + resp = sdkerrors.QueryResultWithDebug(sdkerrors.Wrap(sdkerrors.ErrUnknownRequest, "expected second parameter to be 'filter'"), app.trace) + } + + return resp +} diff --git a/baseapp/queryrouter.go b/baseapp/queryrouter.go index 1727b2ab2d..13bef6ad08 100644 --- a/baseapp/queryrouter.go +++ b/baseapp/queryrouter.go @@ -2,6 +2,7 @@ package baseapp import ( "fmt" + "strings" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -21,16 +22,22 @@ func NewQueryRouter() *QueryRouter { // AddRoute adds a query path to the router with a given Querier. It will panic // if a duplicate route is given. The route must be alphanumeric. -func (qrt *QueryRouter) AddRoute(path string, q sdk.Querier) sdk.QueryRouter { - if !sdk.IsAlphaNumeric(path) { +func (qrt *QueryRouter) AddRoute(route string, q sdk.Querier) sdk.QueryRouter { + if !sdk.IsAlphaNumeric(route) { panic("route expressions can only contain alphanumeric characters") } - if qrt.routes[path] != nil { - panic(fmt.Sprintf("route %s has already been initialized", path)) + // paths are only the final extensions! + // Needed to ensure erroneous queries don't get into the state machine. + if strings.Contains(route, "/") { + panic("route's don't contain '/'") } - qrt.routes[path] = q + if qrt.routes[route] != nil { + panic(fmt.Sprintf("route %s has already been initialized", route)) + } + + qrt.routes[route] = q return qrt } diff --git a/baseapp/streaming.go b/baseapp/streaming.go new file mode 100644 index 0000000000..39e0f1ca6e --- /dev/null +++ b/baseapp/streaming.go @@ -0,0 +1,33 @@ +package baseapp + +import ( + "io" + "sync" + + abci "github.com/tendermint/tendermint/abci/types" + + store "github.com/cosmos/cosmos-sdk/store/types" + "github.com/cosmos/cosmos-sdk/types" +) + +// ABCIListener interface used to hook into the ABCI message processing of the BaseApp +type ABCIListener interface { + // ListenBeginBlock updates the streaming service with the latest BeginBlock messages + ListenBeginBlock(ctx types.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error + // ListenEndBlock updates the steaming service with the latest EndBlock messages + ListenEndBlock(ctx types.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error + // ListenDeliverTx updates the steaming service with the latest DeliverTx messages + ListenDeliverTx(ctx types.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error +} + +// StreamingService interface for registering WriteListeners with the BaseApp and updating the service with the ABCI messages using the hooks +type StreamingService interface { + // Stream is the streaming service loop, awaits kv pairs and writes them to some destination stream or file + Stream(wg *sync.WaitGroup) error + // Listeners returns the streaming service's listeners for the BaseApp to register + Listeners() map[store.StoreKey][]store.WriteListener + // ABCIListener interface for hooking into the ABCI messages from inside the BaseApp + ABCIListener + // Closer interface + io.Closer +} diff --git a/baseapp/testutil/buf.gen.yaml b/baseapp/testutil/buf.gen.yaml new file mode 100644 index 0000000000..d7d17bbb26 --- /dev/null +++ b/baseapp/testutil/buf.gen.yaml @@ -0,0 +1,5 @@ +version: v1 +plugins: + - name: gocosmos + out: ../.. + opt: plugins=grpc,Mgoogle/protobuf/any.proto=github.com/cosmos/cosmos-sdk/codec/types diff --git a/baseapp/testutil/buf.lock b/baseapp/testutil/buf.lock new file mode 100644 index 0000000000..c6f890bd4b --- /dev/null +++ b/baseapp/testutil/buf.lock @@ -0,0 +1,17 @@ +# Generated by buf. DO NOT EDIT. +version: v1 +deps: + - remote: buf.build + owner: cosmos + repository: cosmos-proto + branch: main + commit: 9e9a53f8db0d493f8b8c66d458c767c1 + digest: b1-6w7Hozd_Oo_yZ1Sku8Nhz9qou-4licLr6VmEyeI9jO4= + create_time: 2021-12-02T20:41:47.795828Z + - remote: buf.build + owner: cosmos + repository: gogo-proto + branch: main + commit: bee5511075b7499da6178d9e4aaa628b + digest: b1-rrBIustouD-S80cVoZ_rM0qJsmei9AgbXy9GPQu6vxg= + create_time: 2021-12-02T20:01:17.069307Z diff --git a/baseapp/testutil/buf.yaml b/baseapp/testutil/buf.yaml new file mode 100644 index 0000000000..e6f82c0cdc --- /dev/null +++ b/baseapp/testutil/buf.yaml @@ -0,0 +1,4 @@ +version: v1 +deps: + - buf.build/cosmos/gogo-proto + - buf.build/cosmos/cosmos-proto diff --git a/baseapp/testutil/messages.go b/baseapp/testutil/messages.go new file mode 100644 index 0000000000..f0950eedc6 --- /dev/null +++ b/baseapp/testutil/messages.go @@ -0,0 +1,60 @@ +package testutil + +import ( + "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +func RegisterInterfaces(registry types.InterfaceRegistry) { + registry.RegisterImplementations( + (*sdk.Msg)(nil), + &MsgCounter{}, + &MsgCounter2{}, + &MsgKeyValue{}, + ) + msgservice.RegisterMsgServiceDesc(registry, &_Counter_serviceDesc) + msgservice.RegisterMsgServiceDesc(registry, &_Counter2_serviceDesc) + msgservice.RegisterMsgServiceDesc(registry, &_KeyValue_serviceDesc) +} + +var _ sdk.Msg = &MsgCounter{} + +func (msg *MsgCounter) GetSigners() []sdk.AccAddress { return []sdk.AccAddress{} } +func (msg *MsgCounter) ValidateBasic() error { + if msg.Counter >= 0 { + return nil + } + return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") +} + +var _ sdk.Msg = &MsgCounter2{} + +func (msg *MsgCounter2) GetSigners() []sdk.AccAddress { return []sdk.AccAddress{} } +func (msg *MsgCounter2) ValidateBasic() error { + if msg.Counter >= 0 { + return nil + } + return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") +} + +var _ sdk.Msg = &MsgKeyValue{} + +func (msg *MsgKeyValue) GetSigners() []sdk.AccAddress { + if msg.Signer == "" { + return []sdk.AccAddress{} + } + + return []sdk.AccAddress{sdk.MustAccAddressFromBech32(msg.Signer)} +} + +func (msg *MsgKeyValue) ValidateBasic() error { + if msg.Key == nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "key cannot be nil") + } + if msg.Value == nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "value cannot be nil") + } + return nil +} diff --git a/baseapp/testutil/messages.pb.go b/baseapp/testutil/messages.pb.go new file mode 100644 index 0000000000..9a19f22013 --- /dev/null +++ b/baseapp/testutil/messages.pb.go @@ -0,0 +1,1293 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: messages.proto + +package testutil + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/codec/types" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type MsgCounter struct { + Counter int64 `protobuf:"varint,1,opt,name=counter,proto3" json:"counter,omitempty"` + FailOnHandler bool `protobuf:"varint,2,opt,name=fail_on_handler,json=failOnHandler,proto3" json:"fail_on_handler,omitempty"` +} + +func (m *MsgCounter) Reset() { *m = MsgCounter{} } +func (m *MsgCounter) String() string { return proto.CompactTextString(m) } +func (*MsgCounter) ProtoMessage() {} +func (*MsgCounter) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{0} +} +func (m *MsgCounter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCounter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCounter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCounter) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCounter.Merge(m, src) +} +func (m *MsgCounter) XXX_Size() int { + return m.Size() +} +func (m *MsgCounter) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCounter.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCounter proto.InternalMessageInfo + +func (m *MsgCounter) GetCounter() int64 { + if m != nil { + return m.Counter + } + return 0 +} + +func (m *MsgCounter) GetFailOnHandler() bool { + if m != nil { + return m.FailOnHandler + } + return false +} + +type MsgCounter2 struct { + Counter int64 `protobuf:"varint,1,opt,name=counter,proto3" json:"counter,omitempty"` + FailOnHandler bool `protobuf:"varint,2,opt,name=fail_on_handler,json=failOnHandler,proto3" json:"fail_on_handler,omitempty"` +} + +func (m *MsgCounter2) Reset() { *m = MsgCounter2{} } +func (m *MsgCounter2) String() string { return proto.CompactTextString(m) } +func (*MsgCounter2) ProtoMessage() {} +func (*MsgCounter2) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{1} +} +func (m *MsgCounter2) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCounter2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCounter2.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCounter2) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCounter2.Merge(m, src) +} +func (m *MsgCounter2) XXX_Size() int { + return m.Size() +} +func (m *MsgCounter2) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCounter2.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCounter2 proto.InternalMessageInfo + +func (m *MsgCounter2) GetCounter() int64 { + if m != nil { + return m.Counter + } + return 0 +} + +func (m *MsgCounter2) GetFailOnHandler() bool { + if m != nil { + return m.FailOnHandler + } + return false +} + +type MsgCreateCounterResponse struct { +} + +func (m *MsgCreateCounterResponse) Reset() { *m = MsgCreateCounterResponse{} } +func (m *MsgCreateCounterResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateCounterResponse) ProtoMessage() {} +func (*MsgCreateCounterResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{2} +} +func (m *MsgCreateCounterResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateCounterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateCounterResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateCounterResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateCounterResponse.Merge(m, src) +} +func (m *MsgCreateCounterResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateCounterResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateCounterResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateCounterResponse proto.InternalMessageInfo + +type MsgKeyValue struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgKeyValue) Reset() { *m = MsgKeyValue{} } +func (m *MsgKeyValue) String() string { return proto.CompactTextString(m) } +func (*MsgKeyValue) ProtoMessage() {} +func (*MsgKeyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{3} +} +func (m *MsgKeyValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgKeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgKeyValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgKeyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgKeyValue.Merge(m, src) +} +func (m *MsgKeyValue) XXX_Size() int { + return m.Size() +} +func (m *MsgKeyValue) XXX_DiscardUnknown() { + xxx_messageInfo_MsgKeyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgKeyValue proto.InternalMessageInfo + +func (m *MsgKeyValue) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *MsgKeyValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *MsgKeyValue) GetSigner() string { + if m != nil { + return m.Signer + } + return "" +} + +type MsgCreateKeyValueResponse struct { +} + +func (m *MsgCreateKeyValueResponse) Reset() { *m = MsgCreateKeyValueResponse{} } +func (m *MsgCreateKeyValueResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateKeyValueResponse) ProtoMessage() {} +func (*MsgCreateKeyValueResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{4} +} +func (m *MsgCreateKeyValueResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateKeyValueResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateKeyValueResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateKeyValueResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateKeyValueResponse.Merge(m, src) +} +func (m *MsgCreateKeyValueResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateKeyValueResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateKeyValueResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateKeyValueResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgCounter)(nil), "testdata.MsgCounter") + proto.RegisterType((*MsgCounter2)(nil), "testdata.MsgCounter2") + proto.RegisterType((*MsgCreateCounterResponse)(nil), "testdata.MsgCreateCounterResponse") + proto.RegisterType((*MsgKeyValue)(nil), "testdata.MsgKeyValue") + proto.RegisterType((*MsgCreateKeyValueResponse)(nil), "testdata.MsgCreateKeyValueResponse") +} + +func init() { proto.RegisterFile("messages.proto", fileDescriptor_4dc296cbfe5ffcd5) } + +var fileDescriptor_4dc296cbfe5ffcd5 = []byte{ + // 378 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x92, 0xc1, 0x8a, 0x9b, 0x50, + 0x14, 0x86, 0x63, 0xa5, 0x89, 0x3d, 0x4d, 0xdb, 0x20, 0x69, 0x31, 0x16, 0x24, 0x58, 0x28, 0xd9, + 0x44, 0xc1, 0x3e, 0x41, 0xdb, 0x45, 0x5b, 0x5a, 0x1b, 0xb0, 0xd0, 0x61, 0x66, 0x13, 0xae, 0xe6, + 0xe4, 0x46, 0xa2, 0xf7, 0x8a, 0xf7, 0x3a, 0x90, 0xb7, 0x98, 0xc7, 0x9a, 0x65, 0x96, 0xb3, 0x1c, + 0x92, 0x17, 0x19, 0xd4, 0x98, 0x30, 0xc1, 0xc5, 0x2c, 0x66, 0xe5, 0x39, 0xff, 0x0f, 0xdf, 0xcf, + 0xf9, 0xbd, 0xf0, 0x36, 0x45, 0x21, 0x08, 0x45, 0xe1, 0x64, 0x39, 0x97, 0x5c, 0xd7, 0x24, 0x0a, + 0xb9, 0x20, 0x92, 0x98, 0x43, 0xca, 0x29, 0xaf, 0x44, 0xb7, 0x9c, 0x6a, 0xdf, 0x1c, 0x51, 0xce, + 0x69, 0x82, 0x6e, 0xb5, 0x85, 0xc5, 0xd2, 0x25, 0x6c, 0x53, 0x5b, 0xf6, 0x5f, 0x00, 0x5f, 0xd0, + 0xef, 0xbc, 0x60, 0x12, 0x73, 0xdd, 0x80, 0x5e, 0x54, 0x8f, 0x86, 0x32, 0x56, 0x26, 0x6a, 0xd0, + 0xac, 0xfa, 0x67, 0x78, 0xb7, 0x24, 0x71, 0x32, 0xe7, 0x6c, 0xbe, 0x22, 0x6c, 0x91, 0x60, 0x6e, + 0xbc, 0x18, 0x2b, 0x13, 0x2d, 0x78, 0x53, 0xca, 0x33, 0xf6, 0xb3, 0x16, 0xed, 0x19, 0xbc, 0x3e, + 0xf1, 0xbc, 0x67, 0x00, 0x9a, 0x60, 0x94, 0xc0, 0x1c, 0x89, 0xc4, 0x03, 0x36, 0x40, 0x91, 0x71, + 0x26, 0xd0, 0xf6, 0xab, 0xb0, 0xdf, 0xb8, 0xf9, 0x4f, 0x92, 0x02, 0xf5, 0x01, 0xa8, 0x6b, 0xdc, + 0x54, 0x41, 0xfd, 0xa0, 0x1c, 0xf5, 0x21, 0xbc, 0xbc, 0x2e, 0xad, 0x0a, 0xdd, 0x0f, 0xea, 0x45, + 0xff, 0x00, 0x5d, 0x11, 0x53, 0x86, 0xb9, 0xa1, 0x8e, 0x95, 0xc9, 0xab, 0xe0, 0xb0, 0xd9, 0x1f, + 0x61, 0x74, 0x8c, 0x6a, 0xa0, 0x4d, 0x96, 0x77, 0x01, 0xbd, 0xa6, 0xa5, 0x3f, 0x30, 0xf8, 0xc5, + 0xa2, 0x1c, 0x53, 0x64, 0xb2, 0xd1, 0x86, 0x4e, 0xf3, 0x0f, 0x9c, 0xd3, 0xfd, 0xa6, 0xfd, 0x58, + 0x6d, 0x3b, 0xc2, 0xbb, 0x04, 0xed, 0x58, 0x97, 0xdf, 0x42, 0x7e, 0xdf, 0x46, 0xf6, 0x9e, 0x84, + 0xf6, 0x41, 0x3b, 0x96, 0xf3, 0x15, 0xd4, 0x7f, 0x28, 0xcf, 0x68, 0x8d, 0x6b, 0x7e, 0x6a, 0xa1, + 0x9d, 0x57, 0xf0, 0xed, 0xc7, 0xed, 0xce, 0x52, 0xb6, 0x3b, 0x4b, 0xb9, 0xdf, 0x59, 0xca, 0xcd, + 0xde, 0xea, 0x6c, 0xf7, 0x56, 0xe7, 0x6e, 0x6f, 0x75, 0xae, 0xa6, 0x34, 0x96, 0xab, 0x22, 0x74, + 0x22, 0x9e, 0xba, 0x11, 0x17, 0x29, 0x17, 0x87, 0xcf, 0x54, 0x2c, 0xd6, 0x6e, 0x48, 0x04, 0x92, + 0x2c, 0x73, 0xcb, 0x88, 0x42, 0xc6, 0x49, 0xd8, 0xad, 0xde, 0xde, 0x97, 0x87, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x63, 0x31, 0xab, 0xcc, 0xc8, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// CounterClient is the client API for Counter service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type CounterClient interface { + IncrementCounter(ctx context.Context, in *MsgCounter, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) +} + +type counterClient struct { + cc grpc1.ClientConn +} + +func NewCounterClient(cc grpc1.ClientConn) CounterClient { + return &counterClient{cc} +} + +func (c *counterClient) IncrementCounter(ctx context.Context, in *MsgCounter, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) { + out := new(MsgCreateCounterResponse) + err := c.cc.Invoke(ctx, "/testdata.Counter/IncrementCounter", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CounterServer is the server API for Counter service. +type CounterServer interface { + IncrementCounter(context.Context, *MsgCounter) (*MsgCreateCounterResponse, error) +} + +// UnimplementedCounterServer can be embedded to have forward compatible implementations. +type UnimplementedCounterServer struct { +} + +func (*UnimplementedCounterServer) IncrementCounter(ctx context.Context, req *MsgCounter) (*MsgCreateCounterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method IncrementCounter not implemented") +} + +func RegisterCounterServer(s grpc1.Server, srv CounterServer) { + s.RegisterService(&_Counter_serviceDesc, srv) +} + +func _Counter_IncrementCounter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCounter) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CounterServer).IncrementCounter(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/testdata.Counter/IncrementCounter", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CounterServer).IncrementCounter(ctx, req.(*MsgCounter)) + } + return interceptor(ctx, in, info, handler) +} + +var _Counter_serviceDesc = grpc.ServiceDesc{ + ServiceName: "testdata.Counter", + HandlerType: (*CounterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "IncrementCounter", + Handler: _Counter_IncrementCounter_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "messages.proto", +} + +// Counter2Client is the client API for Counter2 service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type Counter2Client interface { + IncrementCounter(ctx context.Context, in *MsgCounter2, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) +} + +type counter2Client struct { + cc grpc1.ClientConn +} + +func NewCounter2Client(cc grpc1.ClientConn) Counter2Client { + return &counter2Client{cc} +} + +func (c *counter2Client) IncrementCounter(ctx context.Context, in *MsgCounter2, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) { + out := new(MsgCreateCounterResponse) + err := c.cc.Invoke(ctx, "/testdata.Counter2/IncrementCounter", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Counter2Server is the server API for Counter2 service. +type Counter2Server interface { + IncrementCounter(context.Context, *MsgCounter2) (*MsgCreateCounterResponse, error) +} + +// UnimplementedCounter2Server can be embedded to have forward compatible implementations. +type UnimplementedCounter2Server struct { +} + +func (*UnimplementedCounter2Server) IncrementCounter(ctx context.Context, req *MsgCounter2) (*MsgCreateCounterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method IncrementCounter not implemented") +} + +func RegisterCounter2Server(s grpc1.Server, srv Counter2Server) { + s.RegisterService(&_Counter2_serviceDesc, srv) +} + +func _Counter2_IncrementCounter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCounter2) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(Counter2Server).IncrementCounter(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/testdata.Counter2/IncrementCounter", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(Counter2Server).IncrementCounter(ctx, req.(*MsgCounter2)) + } + return interceptor(ctx, in, info, handler) +} + +var _Counter2_serviceDesc = grpc.ServiceDesc{ + ServiceName: "testdata.Counter2", + HandlerType: (*Counter2Server)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "IncrementCounter", + Handler: _Counter2_IncrementCounter_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "messages.proto", +} + +// KeyValueClient is the client API for KeyValue service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type KeyValueClient interface { + Set(ctx context.Context, in *MsgKeyValue, opts ...grpc.CallOption) (*MsgCreateKeyValueResponse, error) +} + +type keyValueClient struct { + cc grpc1.ClientConn +} + +func NewKeyValueClient(cc grpc1.ClientConn) KeyValueClient { + return &keyValueClient{cc} +} + +func (c *keyValueClient) Set(ctx context.Context, in *MsgKeyValue, opts ...grpc.CallOption) (*MsgCreateKeyValueResponse, error) { + out := new(MsgCreateKeyValueResponse) + err := c.cc.Invoke(ctx, "/testdata.KeyValue/Set", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KeyValueServer is the server API for KeyValue service. +type KeyValueServer interface { + Set(context.Context, *MsgKeyValue) (*MsgCreateKeyValueResponse, error) +} + +// UnimplementedKeyValueServer can be embedded to have forward compatible implementations. +type UnimplementedKeyValueServer struct { +} + +func (*UnimplementedKeyValueServer) Set(ctx context.Context, req *MsgKeyValue) (*MsgCreateKeyValueResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") +} + +func RegisterKeyValueServer(s grpc1.Server, srv KeyValueServer) { + s.RegisterService(&_KeyValue_serviceDesc, srv) +} + +func _KeyValue_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgKeyValue) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyValueServer).Set(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/testdata.KeyValue/Set", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyValueServer).Set(ctx, req.(*MsgKeyValue)) + } + return interceptor(ctx, in, info, handler) +} + +var _KeyValue_serviceDesc = grpc.ServiceDesc{ + ServiceName: "testdata.KeyValue", + HandlerType: (*KeyValueServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Set", + Handler: _KeyValue_Set_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "messages.proto", +} + +func (m *MsgCounter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCounter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCounter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FailOnHandler { + i-- + if m.FailOnHandler { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Counter != 0 { + i = encodeVarintMessages(dAtA, i, uint64(m.Counter)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MsgCounter2) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCounter2) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCounter2) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FailOnHandler { + i-- + if m.FailOnHandler { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Counter != 0 { + i = encodeVarintMessages(dAtA, i, uint64(m.Counter)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MsgCreateCounterResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateCounterResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateCounterResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgKeyValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgKeyValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgKeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintMessages(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x1a + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintMessages(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintMessages(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgCreateKeyValueResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateKeyValueResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateKeyValueResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintMessages(dAtA []byte, offset int, v uint64) int { + offset -= sovMessages(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgCounter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Counter != 0 { + n += 1 + sovMessages(uint64(m.Counter)) + } + if m.FailOnHandler { + n += 2 + } + return n +} + +func (m *MsgCounter2) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Counter != 0 { + n += 1 + sovMessages(uint64(m.Counter)) + } + if m.FailOnHandler { + n += 2 + } + return n +} + +func (m *MsgCreateCounterResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgKeyValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovMessages(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovMessages(uint64(l)) + } + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovMessages(uint64(l)) + } + return n +} + +func (m *MsgCreateKeyValueResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovMessages(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMessages(x uint64) (n int) { + return sovMessages(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgCounter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCounter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCounter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) + } + m.Counter = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Counter |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailOnHandler", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FailOnHandler = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCounter2) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCounter2: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCounter2: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) + } + m.Counter = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Counter |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailOnHandler", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FailOnHandler = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateCounterResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateCounterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateCounterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgKeyValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgKeyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgKeyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateKeyValueResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateKeyValueResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateKeyValueResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMessages(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessages + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessages + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessages + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMessages + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMessages + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMessages + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMessages = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMessages = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMessages = fmt.Errorf("proto: unexpected end of group") +) diff --git a/baseapp/testutil/messages.proto b/baseapp/testutil/messages.proto new file mode 100644 index 0000000000..866e336669 --- /dev/null +++ b/baseapp/testutil/messages.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; +package testdata; + +import "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; + +option go_package = "github.com/cosmos/cosmos-sdk/baseapp/testutil"; + +message MsgCounter { + int64 counter = 1; + bool fail_on_handler = 2; +} + +message MsgCounter2 { + int64 counter = 1; + bool fail_on_handler = 2; +} + +message MsgCreateCounterResponse {} + +message MsgKeyValue { + bytes key = 1; + bytes value = 2; + string signer = 3; +} + +message MsgCreateKeyValueResponse {} + +service Counter { + rpc IncrementCounter(MsgCounter) returns (MsgCreateCounterResponse); +} + +service Counter2 { + rpc IncrementCounter(MsgCounter2) returns (MsgCreateCounterResponse); +} + +service KeyValue { + rpc Set(MsgKeyValue) returns (MsgCreateKeyValueResponse); +} \ No newline at end of file diff --git a/client/config/config_test.go b/client/config/config_test.go index c058edf830..a7a7232cb8 100644 --- a/client/config/config_test.go +++ b/client/config/config_test.go @@ -3,7 +3,7 @@ package config_test import ( "bytes" "fmt" - "io/ioutil" + "io" "os" "testing" @@ -58,7 +58,7 @@ func TestConfigCmd(t *testing.T) { cmd.SetOut(b) cmd.SetArgs([]string{"node"}) cmd.Execute() - out, err := ioutil.ReadAll(b) + out, err := io.ReadAll(b) require.NoError(t, err) require.Equal(t, string(out), testNode1+"\n") } diff --git a/client/config/toml.go b/client/config/toml.go index e5357eaf94..97c8b00b42 100644 --- a/client/config/toml.go +++ b/client/config/toml.go @@ -2,7 +2,6 @@ package config import ( "bytes" - "io/ioutil" "os" "text/template" @@ -43,7 +42,7 @@ func writeConfigToFile(configFilePath string, config *ClientConfig) error { return err } - return ioutil.WriteFile(configFilePath, buffer.Bytes(), 0o600) + return os.WriteFile(configFilePath, buffer.Bytes(), 0o600) } // ensureConfigPath creates a directory configPath if it does not exist diff --git a/client/keys/add.go b/client/keys/add.go index d9713fd2d5..11a6326ab7 100644 --- a/client/keys/add.go +++ b/client/keys/add.go @@ -93,12 +93,13 @@ func runAddCmdPrepare(cmd *cobra.Command, args []string) error { /* input - - bip39 mnemonic - - bip39 passphrase - - bip44 path - - local encryption password + - bip39 mnemonic + - bip39 passphrase + - bip44 path + - local encryption password + output - - armor encrypted private key (saved to file) + - armor encrypted private key (saved to file) */ func runAddCmd(ctx client.Context, cmd *cobra.Command, args []string, inBuf *bufio.Reader) error { var err error diff --git a/client/keys/add_ledger_test.go b/client/keys/add_ledger_test.go index cfa63eed8d..0a3206bfea 100644 --- a/client/keys/add_ledger_test.go +++ b/client/keys/add_ledger_test.go @@ -7,7 +7,7 @@ import ( "bytes" "context" "fmt" - "io/ioutil" + "io" "testing" "github.com/stretchr/testify/require" @@ -179,7 +179,7 @@ func Test_runAddCmdLedgerDryRun(t *testing.T) { _, err = kb.Key("testkey") require.NoError(t, err) - out, err := ioutil.ReadAll(b) + out, err := io.ReadAll(b) require.NoError(t, err) require.Contains(t, string(out), "name: testkey") } else { diff --git a/client/keys/add_test.go b/client/keys/add_test.go index f59848a8d1..88168bcdf9 100644 --- a/client/keys/add_test.go +++ b/client/keys/add_test.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "fmt" - "io/ioutil" + "io" "testing" "github.com/stretchr/testify/require" @@ -218,7 +218,7 @@ func Test_runAddCmdDryRun(t *testing.T) { _, err = kb.Key("testkey") require.NoError(t, err) - out, err := ioutil.ReadAll(b) + out, err := io.ReadAll(b) require.NoError(t, err) require.Contains(t, string(out), "name: testkey") } else { diff --git a/client/keys/import.go b/client/keys/import.go index 36662a8dd2..a9d5c185ac 100644 --- a/client/keys/import.go +++ b/client/keys/import.go @@ -2,7 +2,7 @@ package keys import ( "bufio" - "io/ioutil" + "os" "github.com/spf13/cobra" @@ -24,7 +24,7 @@ func ImportKeyCommand() *cobra.Command { } buf := bufio.NewReader(clientCtx.Input) - bz, err := ioutil.ReadFile(args[1]) + bz, err := os.ReadFile(args[1]) if err != nil { return err } diff --git a/client/keys/import_test.go b/client/keys/import_test.go index 00d4de05a8..91e9379086 100644 --- a/client/keys/import_test.go +++ b/client/keys/import_test.go @@ -3,7 +3,6 @@ package keys import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "testing" @@ -95,7 +94,7 @@ HbP+c6JmeJy9JXe2rbbF1QtCX1gLqGcDQPBXiCtFvP7/8wTZtVOPj8vREzhZ9ElO keyfile := filepath.Join(kbHome, "key.asc") - require.NoError(t, ioutil.WriteFile(keyfile, []byte(armoredKey), 0o644)) + require.NoError(t, os.WriteFile(keyfile, []byte(armoredKey), 0o644)) defer func() { _ = os.RemoveAll(kbHome) diff --git a/client/keys/migrate.go b/client/keys/migrate.go index 321750d9e8..10f9a33934 100644 --- a/client/keys/migrate.go +++ b/client/keys/migrate.go @@ -3,7 +3,6 @@ package keys import ( "bufio" "fmt" - "io/ioutil" "os" "github.com/pkg/errors" @@ -68,7 +67,7 @@ func runMigrateCmd(cmd *cobra.Command, args []string) error { ) if dryRun, _ := cmd.Flags().GetBool(flags.FlagDryRun); dryRun { - tmpDir, err = ioutil.TempDir("", "migrator-migrate-dryrun") + tmpDir, err = os.MkdirTemp("", "migrator-migrate-dryrun") if err != nil { return errors.Wrap(err, "failed to create temporary directory for dryrun migration") } diff --git a/client/pruning/main.go b/client/pruning/main.go new file mode 100644 index 0000000000..dfaf45f7f9 --- /dev/null +++ b/client/pruning/main.go @@ -0,0 +1,121 @@ +package pruning + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/server" + servertypes "github.com/cosmos/cosmos-sdk/server/types" + "github.com/cosmos/cosmos-sdk/store/rootmulti" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/tendermint/tendermint/libs/log" + dbm "github.com/tendermint/tm-db" +) + +const FlagAppDBBackend = "app-db-backend" + +// PruningCmd prunes the sdk root multi store history versions based on the pruning options +// specified by command flags. +func PruningCmd(appCreator servertypes.AppCreator) *cobra.Command { + cmd := &cobra.Command{ + Use: "prune", + Short: "Prune app history states by keeping the recent heights and deleting old heights", + Long: `Prune app history states by keeping the recent heights and deleting old heights. + The pruning option is provided via the '--pruning' flag or alternatively with '--pruning-keep-recent' + + For '--pruning' the options are as follows: + + default: the last 362880 states are kept + nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) + everything: 2 latest states will be kept + custom: allow pruning options to be manually specified through 'pruning-keep-recent'. + besides pruning options, database home directory and database backend type should also be specified via flags + '--home' and '--app-db-backend'. + valid app-db-backend type includes 'goleveldb', 'cleveldb', 'rocksdb', 'boltdb', and 'badgerdb'. + `, + Example: `prune --home './' --app-db-backend 'goleveldb' --pruning 'custom' --pruning-keep-recent 100 -- + pruning-keep-every 10, --pruning-interval 10`, + RunE: func(cmd *cobra.Command, _ []string) error { + vp := viper.New() + + // Bind flags to the Context's Viper so we can get pruning options. + if err := vp.BindPFlags(cmd.Flags()); err != nil { + return err + } + pruningOptions, err := server.GetPruningOptionsFromFlags(vp) + if err != nil { + return err + } + fmt.Printf("get pruning options from command flags, keep-recent: %v\n", + pruningOptions.KeepRecent, + ) + + home := vp.GetString(flags.FlagHome) + db, err := openDB(home) + if err != nil { + return err + } + + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + app := appCreator(logger, db, nil, vp) + cms := app.CommitMultiStore() + + rootMultiStore, ok := cms.(*rootmulti.Store) + if !ok { + return fmt.Errorf("currently only support the pruning of rootmulti.Store type") + } + latestHeight := rootmulti.GetLatestVersion(db) + // valid heights should be greater than 0. + if latestHeight <= 0 { + return fmt.Errorf("the database has no valid heights to prune, the latest height: %v", latestHeight) + } + + var pruningHeights []int64 + for height := int64(1); height < latestHeight; height++ { + if height < latestHeight-int64(pruningOptions.KeepRecent) { + pruningHeights = append(pruningHeights, height) + } + } + if len(pruningHeights) == 0 { + fmt.Printf("no heights to prune\n") + return nil + } + fmt.Printf( + "pruning heights start from %v, end at %v\n", + pruningHeights[0], + pruningHeights[len(pruningHeights)-1], + ) + + rootMultiStore.PruneStores(false, pruningHeights) + if err != nil { + return err + } + fmt.Printf("successfully pruned the application root multi stores\n") + return nil + }, + } + + cmd.Flags().String(flags.FlagHome, "", "The database home directory") + cmd.Flags().String(FlagAppDBBackend, "", "The type of database for application and snapshots databases") + cmd.Flags().String(server.FlagPruning, storetypes.PruningOptionDefault, "Pruning strategy (default|nothing|everything|custom)") + cmd.Flags().Uint64(server.FlagPruningKeepRecent, 0, "Number of recent heights to keep on disk (ignored if pruning is not 'custom')") + cmd.Flags().Uint64(server.FlagPruningKeepEvery, 0, + `Offset heights to keep on disk after 'keep-every' (ignored if pruning is not 'custom'), + this is not used by this command but kept for compatibility with the complete pruning options`) + cmd.Flags().Uint64(server.FlagPruningInterval, 10, + `Height interval at which pruned heights are removed from disk (ignored if pruning is not 'custom'), + this is not used by this command but kept for compatibility with the complete pruning options`) + + return cmd +} + +func openDB(rootDir string) (dbm.DB, error) { + dataDir := filepath.Join(rootDir, "data") + return sdk.NewLevelDB("application", dataDir) +} diff --git a/client/rest/rest.go b/client/rest/rest.go index 035b0f5dbb..ac05891e04 100644 --- a/client/rest/rest.go +++ b/client/rest/rest.go @@ -25,7 +25,6 @@ func addHTTPDeprecationHeaders(h http.Handler) http.Handler { // WithHTTPDeprecationHeaders returns a new *mux.Router, identical to its input // but with the addition of HTTP Deprecation headers. This is used to mark legacy // amino REST endpoints as deprecated in the REST API. -// nolint: gocritic func WithHTTPDeprecationHeaders(r *mux.Router) *mux.Router { subRouter := r.NewRoute().Subrouter() subRouter.Use(addHTTPDeprecationHeaders) diff --git a/codec/amino_codec.go b/codec/amino_codec.go index 69f4dc133d..77969d22df 100644 --- a/codec/amino_codec.go +++ b/codec/amino_codec.go @@ -96,8 +96,9 @@ func (ac *AminoCodec) MarshalInterface(i proto.Message) ([]byte, error) { // NOTE: to unmarshal a concrete type, you should use Unmarshal instead // // Example: -// var x MyInterface -// err := cdc.UnmarshalInterface(bz, &x) +// +// var x MyInterface +// err := cdc.UnmarshalInterface(bz, &x) func (ac *AminoCodec) UnmarshalInterface(bz []byte, ptr interface{}) error { return ac.LegacyAmino.Unmarshal(bz, ptr) } @@ -117,8 +118,9 @@ func (ac *AminoCodec) MarshalInterfaceJSON(i proto.Message) ([]byte, error) { // NOTE: to unmarshal a concrete type, you should use UnmarshalJSON instead // // Example: -// var x MyInterface -// err := cdc.UnmarshalInterfaceJSON(bz, &x) +// +// var x MyInterface +// err := cdc.UnmarshalInterfaceJSON(bz, &x) func (ac *AminoCodec) UnmarshalInterfaceJSON(bz []byte, ptr interface{}) error { return ac.LegacyAmino.UnmarshalJSON(bz, ptr) } diff --git a/codec/proto_codec.go b/codec/proto_codec.go index efeb7e9b2c..458fd65052 100644 --- a/codec/proto_codec.go +++ b/codec/proto_codec.go @@ -200,8 +200,9 @@ func (pc *ProtoCodec) MarshalInterface(i proto.Message) ([]byte, error) { // NOTE: to unmarshal a concrete type, you should use Unmarshal instead // // Example: -// var x MyInterface -// err := cdc.UnmarshalInterface(bz, &x) +// +// var x MyInterface +// err := cdc.UnmarshalInterface(bz, &x) func (pc *ProtoCodec) UnmarshalInterface(bz []byte, ptr interface{}) error { any := &types.Any{} err := pc.Unmarshal(bz, any) @@ -229,8 +230,9 @@ func (pc *ProtoCodec) MarshalInterfaceJSON(x proto.Message) ([]byte, error) { // NOTE: to unmarshal a concrete type, you should use UnmarshalJSON instead // // Example: -// var x MyInterface // must implement proto.Message -// err := cdc.UnmarshalInterfaceJSON(&x, bz) +// +// var x MyInterface // must implement proto.Message +// err := cdc.UnmarshalInterfaceJSON(&x, bz) func (pc *ProtoCodec) UnmarshalInterfaceJSON(bz []byte, iface interface{}) error { any := &types.Any{} err := pc.UnmarshalJSON(bz, any) diff --git a/codec/types/any.go b/codec/types/any.go index 07c8de98c8..b8d66e0175 100644 --- a/codec/types/any.go +++ b/codec/types/any.go @@ -37,18 +37,14 @@ type Any struct { // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. - // nolint TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - // nolint XXX_NoUnkeyedLiteral struct{} `json:"-"` - // nolint XXX_unrecognized []byte `json:"-"` - // nolint XXX_sizecache int32 `json:"-"` cachedValue interface{} diff --git a/codec/unknownproto/doc.go b/codec/unknownproto/doc.go index 0e0a463422..cef3f8f253 100644 --- a/codec/unknownproto/doc.go +++ b/codec/unknownproto/doc.go @@ -8,17 +8,17 @@ b) Mismatched wire types for a field -- this is indicative of mismatched service Its API signature is similar to proto.Unmarshal([]byte, proto.Message) in the strict case - if err := RejectUnknownFieldsStrict(protoBlob, protoMessage, false); err != nil { - // Handle the error. - } + if err := RejectUnknownFieldsStrict(protoBlob, protoMessage, false); err != nil { + // Handle the error. + } and ideally should be added before invoking proto.Unmarshal, if you'd like to enforce the features mentioned above. By default, for security we report every single field that's unknown, whether a non-critical field or not. To customize this behavior, please set the boolean parameter allowUnknownNonCriticals to true to RejectUnknownFields: - if err := RejectUnknownFields(protoBlob, protoMessage, true); err != nil { - // Handle the error. - } + if err := RejectUnknownFields(protoBlob, protoMessage, true); err != nil { + // Handle the error. + } */ package unknownproto diff --git a/codec/unknownproto/unknown_fields.go b/codec/unknownproto/unknown_fields.go index 17f0e508e4..3af40ffed1 100644 --- a/codec/unknownproto/unknown_fields.go +++ b/codec/unknownproto/unknown_fields.go @@ -5,7 +5,7 @@ import ( "compress/gzip" "errors" "fmt" - "io/ioutil" + "io" "reflect" "strings" "sync" @@ -360,7 +360,7 @@ func extractFileDescMessageDesc(desc descriptorIface) (*descriptor.FileDescripto if err != nil { return nil, nil, err } - protoBlob, err := ioutil.ReadAll(gzr) + protoBlob, err := io.ReadAll(gzr) if err != nil { return nil, nil, err } diff --git a/contrib/images/simd-env/Dockerfile b/contrib/images/simd-env/Dockerfile index b83c3b831b..eb3f2d29aa 100644 --- a/contrib/images/simd-env/Dockerfile +++ b/contrib/images/simd-env/Dockerfile @@ -2,6 +2,7 @@ FROM golang:1.18-alpine AS build RUN apk add build-base git linux-headers WORKDIR /work COPY go.mod go.sum /work/ +COPY ./ics23/go.mod /work/ics23/go.mod RUN go mod download COPY ./ /work diff --git a/cosmovisor/scanner.go b/cosmovisor/scanner.go index 81bca0717a..fb5fa22e1e 100644 --- a/cosmovisor/scanner.go +++ b/cosmovisor/scanner.go @@ -7,13 +7,15 @@ import ( // Trim off whitespace around the info - match least greedy, grab as much space on both sides // Defined here: https://github.com/cosmos/cosmos-sdk/blob/release/v0.38.2/x/upgrade/abci.go#L38 -// fmt.Sprintf("UPGRADE \"%s\" NEEDED at %s: %s", plan.Name, plan.DueAt(), plan.Info) +// +// fmt.Sprintf("UPGRADE \"%s\" NEEDED at %s: %s", plan.Name, plan.DueAt(), plan.Info) +// // DueAt defined here: https://github.com/cosmos/cosmos-sdk/blob/release/v0.38.2/x/upgrade/internal/types/plan.go#L73-L78 // -// if !p.Time.IsZero() { -// return fmt.Sprintf("time: %s", p.Time.UTC().Format(time.RFC3339)) -// } -// return fmt.Sprintf("height: %d", p.Height) +// if !p.Time.IsZero() { +// return fmt.Sprintf("time: %s", p.Time.UTC().Format(time.RFC3339)) +// } +// return fmt.Sprintf("height: %d", p.Height) var upgradeRegex = regexp.MustCompile(`UPGRADE "(.*)" NEEDED at ((height): (\d+)|(time): (\S+)):\s+(\S*)`) // UpgradeInfo is the details from the regexp diff --git a/cosmovisor/upgrade.go b/cosmovisor/upgrade.go index 88d949e2be..beba31c653 100644 --- a/cosmovisor/upgrade.go +++ b/cosmovisor/upgrade.go @@ -4,7 +4,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "net/url" "os" "path/filepath" @@ -106,7 +105,7 @@ func GetDownloadURL(info *UpgradeInfo) (string, error) { doc := strings.TrimSpace(info.Info) // if this is a url, then we download that and try to get a new doc with the real info if _, err := url.Parse(doc); err == nil { - tmpDir, err := ioutil.TempDir("", "upgrade-manager-reference") + tmpDir, err := os.MkdirTemp("", "upgrade-manager-reference") if err != nil { return "", fmt.Errorf("create tempdir for reference file: %w", err) } @@ -117,7 +116,7 @@ func GetDownloadURL(info *UpgradeInfo) (string, error) { return "", fmt.Errorf("downloading reference link %s: %w", doc, err) } - refBytes, err := ioutil.ReadFile(refPath) + refBytes, err := os.ReadFile(refPath) if err != nil { return "", fmt.Errorf("reading downloaded reference: %w", err) } diff --git a/crypto/hd/doc.go b/crypto/hd/doc.go index 38d65213c1..85b6369e75 100644 --- a/crypto/hd/doc.go +++ b/crypto/hd/doc.go @@ -1,8 +1,9 @@ // Package hd provides support for hierarchical deterministic wallets generation and derivation. // // The user must understand the overall concept of the BIP 32 and the BIP 44 specs: -// https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki -// https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki +// +// https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki +// https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki // // In combination with the bip39 package in go-crypto this package provides the functionality for // deriving keys using a BIP 44 HD path, or, more general, by passing a BIP 32 path. diff --git a/crypto/hd/fundraiser_test.go b/crypto/hd/fundraiser_test.go index 4afbfc5a9c..b3252fba03 100644 --- a/crypto/hd/fundraiser_test.go +++ b/crypto/hd/fundraiser_test.go @@ -4,7 +4,7 @@ import ( "encoding/hex" "encoding/json" "fmt" - "io/ioutil" + "os" "testing" "github.com/stretchr/testify/require" @@ -35,7 +35,7 @@ func initFundraiserTestVectors(t *testing.T) []addrData { // var hdPath string = "m/44'/118'/0'/0/0" var hdToAddrTable []addrData - b, err := ioutil.ReadFile("testdata/test.json") + b, err := os.ReadFile("testdata/test.json") if err != nil { t.Fatalf("could not read fundraiser test vector file (testdata/test.json): %s", err) } diff --git a/crypto/hd/hdpath.go b/crypto/hd/hdpath.go index 058ddcc456..a4683a97c6 100644 --- a/crypto/hd/hdpath.go +++ b/crypto/hd/hdpath.go @@ -227,7 +227,7 @@ func DerivePrivateKeyForPath(privKeyBytes, chainCode [32]byte, path string) ([]b // If harden is true, the derivation is 'hardened'. // It returns the new private key and new chain code. // For more information on hardened keys see: -// - https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki +// - https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki func derivePrivateKey(privKeyBytes [32]byte, chainCode [32]byte, index uint32, harden bool) ([32]byte, [32]byte) { var data []byte diff --git a/crypto/keyring/doc.go b/crypto/keyring/doc.go index 04a8b98cdf..a8aad2b215 100644 --- a/crypto/keyring/doc.go +++ b/crypto/keyring/doc.go @@ -1,25 +1,24 @@ // Package keys provides common key management API. // -// -// The Keybase interface +// # The Keybase interface // // The Keybase interface defines the methods that a type needs to implement to be used // as key storage backend. This package provides few implementations out-of-the-box. // -// NewLegacy +// # NewLegacy // // The NewLegacy constructor returns an on-disk implementation backed by LevelDB storage that has been // the default implementation used by the SDK until v0.38.0. Due to security concerns, it is // recommended to drop it in favor of the NewKeyring constructor as it will be removed in future releases. // -// NewInMemory +// # NewInMemory // // The NewInMemory constructor returns an implementation backed by an in-memory, goroutine-safe // map that has historically been used for testing purposes or on-the-fly key generation as the // generated keys are discarded when the process terminates or the type instance is garbage // collected. // -// New +// # New // // The New constructor returns an implementation backed by a keyring library // (https://github.com/99designs/keyring), whose aim is to provide a common abstraction and uniform @@ -27,20 +26,21 @@ // as well as operating system-agnostic encrypted file-based backends. // // The backends: -// os The instance returned by this constructor uses the operating system's default -// credentials store to handle keys storage operations securely. It should be noted -// that the keyring keyring may be kept unlocked for the whole duration of the user -// session. -// file This backend more closely resembles the previous keyring storage used prior to -// v0.38.1. It stores the keyring encrypted within the app's configuration directory. -// This keyring will request a password each time it is accessed, which may occur -// multiple times in a single command resulting in repeated password prompts. -// kwallet This backend uses KDE Wallet Manager as a credentials management application: -// https://github.com/KDE/kwallet -// pass This backend uses the pass command line utility to store and retrieve keys: -// https://www.passwordstore.org/ -// test This backend stores keys insecurely to disk. It does not prompt for a password to -// be unlocked and it should be use only for testing purposes. -// memory Same instance as returned by NewInMemory. This backend uses a transient storage. Keys -// are discarded when the process terminates or the type instance is garbage collected. +// +// os The instance returned by this constructor uses the operating system's default +// credentials store to handle keys storage operations securely. It should be noted +// that the keyring keyring may be kept unlocked for the whole duration of the user +// session. +// file This backend more closely resembles the previous keyring storage used prior to +// v0.38.1. It stores the keyring encrypted within the app's configuration directory. +// This keyring will request a password each time it is accessed, which may occur +// multiple times in a single command resulting in repeated password prompts. +// kwallet This backend uses KDE Wallet Manager as a credentials management application: +// https://github.com/KDE/kwallet +// pass This backend uses the pass command line utility to store and retrieve keys: +// https://www.passwordstore.org/ +// test This backend stores keys insecurely to disk. It does not prompt for a password to +// be unlocked and it should be use only for testing purposes. +// memory Same instance as returned by NewInMemory. This backend uses a transient storage. Keys +// are discarded when the process terminates or the type instance is garbage collected. package keyring diff --git a/crypto/keyring/keyring.go b/crypto/keyring/keyring.go index 183a9fe990..d41152978a 100644 --- a/crypto/keyring/keyring.go +++ b/crypto/keyring/keyring.go @@ -5,7 +5,6 @@ import ( "encoding/hex" "fmt" "io" - "io/ioutil" "os" "path/filepath" "sort" @@ -468,29 +467,48 @@ func wrapKeyNotFound(err error, msg string) error { } func (ks keystore) List() ([]Info, error) { - var res []Info + res := []Info{} keys, err := ks.db.Keys() if err != nil { return nil, err } - sort.Strings(keys) + if len(keys) == 0 { + return res, nil + } + sort.Strings(keys) for _, key := range keys { if strings.HasSuffix(key, infoSuffix) { rawInfo, err := ks.db.Get(key) if err != nil { - return nil, err + fmt.Printf("err for key %s: %q\n", key, err) + + // add the name of the key in case the user wants to retrieve it + // afterwards + info := newOfflineInfo(key, nil, hd.PubKeyType("")) + res = append(res, info) + continue } if len(rawInfo.Data) == 0 { - return nil, sdkerrors.Wrap(sdkerrors.ErrKeyNotFound, key) + fmt.Println(sdkerrors.Wrap(sdkerrors.ErrKeyNotFound, key)) + + // add the name of the key in case the user wants to retrieve it + // afterwards + info := newOfflineInfo(key, nil, hd.PubKeyType("")) + res = append(res, info) + continue } info, err := unmarshalInfo(rawInfo.Data) if err != nil { - return nil, err + fmt.Printf("err for key %s: %q\n", key, err) + + // add the name of the key in case the user wants to retrieve it + // afterwards + info = newOfflineInfo(key, nil, hd.PubKeyType("")) } res = append(res, info) @@ -670,7 +688,7 @@ func newRealPrompt(dir string, buf io.Reader) func(string) (string, error) { switch { case err == nil: - keyhash, err = ioutil.ReadFile(keyhashFilePath) + keyhash, err = os.ReadFile(keyhashFilePath) if err != nil { return "", fmt.Errorf("failed to read %s: %v", keyhashFilePath, err) } @@ -734,7 +752,7 @@ func newRealPrompt(dir string, buf io.Reader) func(string) (string, error) { continue } - if err := ioutil.WriteFile(dir+"/keyhash", passwordHash, 0o555); err != nil { + if err := os.WriteFile(dir+"/keyhash", passwordHash, 0o555); err != nil { return "", err } diff --git a/crypto/keyring/output.go b/crypto/keyring/output.go index 91588e3137..3ebfbbc016 100644 --- a/crypto/keyring/output.go +++ b/crypto/keyring/output.go @@ -21,7 +21,7 @@ type KeyOutput struct { } // NewKeyOutput creates a default KeyOutput instance without Mnemonic, Threshold and PubKeys -func NewKeyOutput(name string, keyType KeyType, a sdk.Address, pk cryptotypes.PubKey) (KeyOutput, error) { // nolint:interfacer +func NewKeyOutput(name string, keyType KeyType, a sdk.Address, pk cryptotypes.PubKey) (KeyOutput, error) { //nolint:interfacer apk, err := codectypes.NewAnyWithValue(pk) if err != nil { return KeyOutput{}, err diff --git a/crypto/keys/multisig/codec.go b/crypto/keys/multisig/codec.go index d501e1b427..9f85ce5523 100644 --- a/crypto/keys/multisig/codec.go +++ b/crypto/keys/multisig/codec.go @@ -15,7 +15,7 @@ const ( PubKeyAminoRoute = "tendermint/PubKeyMultisigThreshold" ) -//nolint +// nolint // Deprecated: Amino is being deprecated in the SDK. But even if you need to // use Amino for some reason, please use `codec/legacy.Cdc` instead. var AminoCdc = codec.NewLegacyAmino() diff --git a/crypto/keys/secp256k1/internal/secp256k1/curve.go b/crypto/keys/secp256k1/internal/secp256k1/curve.go index b5c80b23ad..795d9b31ee 100644 --- a/crypto/keys/secp256k1/internal/secp256k1/curve.go +++ b/crypto/keys/secp256k1/internal/secp256k1/curve.go @@ -30,6 +30,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +//nolint package secp256k1 import ( @@ -100,7 +101,7 @@ func (BitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool { x3.Mul(x3, x) // x³ x3.Add(x3, BitCurve.B) // x³+B - x3.Mod(x3, BitCurve.P) //(x³+B)%P + x3.Mod(x3, BitCurve.P) // (x³+B)%P return x3.Cmp(y2) == 0 } @@ -222,9 +223,9 @@ func (BitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, c := new(big.Int).Mul(b, b) // B² d := new(big.Int).Add(x, b) // X1+B - d.Mul(d, d) //(X1+B)² - d.Sub(d, a) //(X1+B)²-A - d.Sub(d, c) //(X1+B)²-A-C + d.Mul(d, d) // (X1+B)² + d.Sub(d, a) // (X1+B)²-A + d.Sub(d, c) // (X1+B)²-A-C d.Mul(d, big.NewInt(2)) // 2*((X1+B)²-A-C) e := new(big.Int).Mul(big.NewInt(3), a) // 3*A diff --git a/crypto/keys/secp256k1/internal/secp256k1/scalar_mult_cgo.go b/crypto/keys/secp256k1/internal/secp256k1/scalar_mult_cgo.go index 8afa9d023b..e2c10db4b9 100644 --- a/crypto/keys/secp256k1/internal/secp256k1/scalar_mult_cgo.go +++ b/crypto/keys/secp256k1/internal/secp256k1/scalar_mult_cgo.go @@ -5,6 +5,8 @@ //go:build !gofuzz && cgo // +build !gofuzz,cgo +// +//nolint:gocritic package secp256k1 import ( diff --git a/crypto/keys/secp256k1/secp256k1.go b/crypto/keys/secp256k1/secp256k1.go index 51034275cd..9ec713acbe 100644 --- a/crypto/keys/secp256k1/secp256k1.go +++ b/crypto/keys/secp256k1/secp256k1.go @@ -10,7 +10,7 @@ import ( secp256k1 "github.com/btcsuite/btcd/btcec" "github.com/tendermint/tendermint/crypto" - "golang.org/x/crypto/ripemd160" // nolint: staticcheck // necessary for Bitcoin address format + "golang.org/x/crypto/ripemd160" //nolint: staticcheck // necessary for Bitcoin address format "github.com/cosmos/cosmos-sdk/codec" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" diff --git a/crypto/keys/secp256k1/secp256k1_nocgo.go b/crypto/keys/secp256k1/secp256k1_nocgo.go index 60fd957797..e12a410a81 100644 --- a/crypto/keys/secp256k1/secp256k1_nocgo.go +++ b/crypto/keys/secp256k1/secp256k1_nocgo.go @@ -13,8 +13,8 @@ import ( // used to reject malleable signatures // see: -// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93 -// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39 +// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93 +// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39 var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1) // Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg. diff --git a/docs/architecture/adr-038-state-listening.md b/docs/architecture/adr-038-state-listening.md index 9bc644dddb..e6d321e8cf 100644 --- a/docs/architecture/adr-038-state-listening.md +++ b/docs/architecture/adr-038-state-listening.md @@ -32,7 +32,7 @@ In a new file, `store/types/listening.go`, we will create a `WriteListener` inte type WriteListener interface { // if value is nil then it was deleted // storeKey indicates the source KVStore, to facilitate using the the same WriteListener across separate KVStores - // set bool indicates if it was a set; true: set, false: delete + // delete bool indicates if it was a delete; true: delete, false: set OnWrite(storeKey StoreKey, key []byte, value []byte, delete bool) error } ``` @@ -205,20 +205,30 @@ func (rs *Store) CacheMultiStore() types.CacheMultiStore { ### Exposing the data We will introduce a new `StreamingService` interface for exposing `WriteListener` data streams to external consumers. +In addition to streaming state changes as `StoreKVPair`s, the interface satisfies an `ABCIListener` interface that plugs into the BaseApp +and relays ABCI requests and responses so that the service can group the state changes with the ABCI requests that affected them and the ABCI responses they affected. ```go -// Hook interface used to hook into the ABCI message processing of the BaseApp -type Hook interface { - ListenBeginBlock(ctx sdk.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) // update the streaming service with the latest BeginBlock messages - ListenEndBlock(ctx sdk.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) // update the steaming service with the latest EndBlock messages - ListenDeliverTx(ctx sdk.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) // update the steaming service with the latest DeliverTx messages +// ABCIListener interface used to hook into the ABCI message processing of the BaseApp +type ABCIListener interface { + // ListenBeginBlock updates the streaming service with the latest BeginBlock messages + ListenBeginBlock(ctx types.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error + // ListenEndBlock updates the steaming service with the latest EndBlock messages + ListenEndBlock(ctx types.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error + // ListenDeliverTx updates the steaming service with the latest DeliverTx messages + ListenDeliverTx(ctx types.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error } // StreamingService interface for registering WriteListeners with the BaseApp and updating the service with the ABCI messages using the hooks type StreamingService interface { - Stream(wg *sync.WaitGroup, quitChan <-chan struct{}) // streaming service loop, awaits kv pairs and writes them to some destination stream or file - Listeners() map[sdk.StoreKey][]storeTypes.WriteListener // returns the streaming service's listeners for the BaseApp to register - Hook + // Stream is the streaming service loop, awaits kv pairs and writes them to some destination stream or file + Stream(wg *sync.WaitGroup) error + // Listeners returns the streaming service's listeners for the BaseApp to register + Listeners() map[types.StoreKey][]store.WriteListener + // ABCIListener interface for hooking into the ABCI messages from inside the BaseApp + ABCIListener + // Closer interface + io.Closer } ``` @@ -228,18 +238,45 @@ We will introduce an implementation of `StreamingService` which writes state cha This service uses the same `StoreKVPairWriteListener` for every KVStore, writing all the KV pairs from every KVStore out to the same files, relying on the `StoreKey` field in the `StoreKVPair` protobuf message to later distinguish the source for each pair. -The file naming schema is as such: +Writing to a file is the simplest approach for streaming the data out to consumers. +This approach also provides the advantages of being persistent and durable, and the files can be read directly, +or an auxiliary streaming services can read from the files and serve the data over a remote interface. + +##### Encoding + +For each pair of `BeginBlock` requests and responses, a file is created and named `block-{N}-begin`, where N is the block number. +At the head of this file the length-prefixed protobuf encoded `BeginBlock` request is written. +At the tail of this file the length-prefixed protobuf encoded `BeginBlock` response is written. +In between these two encoded messages, the state changes that occurred due to the `BeginBlock` request are written chronologically as +a series of length-prefixed protobuf encoded `StoreKVPair`s representing `Set` and `Delete` operations within the KVStores the service +is configured to listen to. + +For each pair of `DeliverTx` requests and responses, a file is created and named `block-{N}-tx-{M}` where N is the block number and M +is the tx number in the block (i.e. 0, 1, 2...). +At the head of this file the length-prefixed protobuf encoded `DeliverTx` request is written. +At the tail of this file the length-prefixed protobuf encoded `DeliverTx` response is written. +In between these two encoded messages, the state changes that occurred due to the `DeliverTx` request are written chronologically as +a series of length-prefixed protobuf encoded `StoreKVPair`s representing `Set` and `Delete` operations within the KVStores the service +is configured to listen to. + +For each pair of `EndBlock` requests and responses, a file is created and named `block-{N}-end`, where N is the block number. +At the head of this file the length-prefixed protobuf encoded `EndBlock` request is written. +At the tail of this file the length-prefixed protobuf encoded `EndBlock` response is written. +In between these two encoded messages, the state changes that occurred due to the `EndBlock` request are written chronologically as +a series of length-prefixed protobuf encoded `StoreKVPair`s representing `Set` and `Delete` operations within the KVStores the service +is configured to listen to. + +##### Decoding -* After every `BeginBlock` request a new file is created with the name `block-{N}-begin`, where N is the block number. All -subsequent state changes are written out to this file until the first `DeliverTx` request is received. At the head of these files, - the length-prefixed protobuf encoded `BeginBlock` request is written, and the response is written at the tail. -* After every `DeliverTx` request a new file is created with the name `block-{N}-tx-{M}` where N is the block number and M -is the tx number in the block (i.e. 0, 1, 2...). All subsequent state changes are written out to this file until the next -`DeliverTx` request is received or an `EndBlock` request is received. At the head of these files, the length-prefixed protobuf - encoded `DeliverTx` request is written, and the response is written at the tail. -* After every `EndBlock` request a new file is created with the name `block-{N}-end`, where N is the block number. All -subsequent state changes are written out to this file until the next `BeginBlock` request is received. At the head of these files, - the length-prefixed protobuf encoded `EndBlock` request is written, and the response is written at the tail. +To decode the files written in the above format we read all the bytes from a given file into memory and segment them into proto +messages based on the length-prefixing of each message. Once segmented, it is known that the first message is the ABCI request, +the last message is the ABCI response, and that every message in between is a `StoreKVPair`. This enables us to decode each segment into +the appropriate message type. + +The type of ABCI req/res, the block height, and the transaction index (where relevant) is known +from the file name, and the KVStore each `StoreKVPair` originates from is known since the `StoreKey` is included as a field in the proto message. + +##### Implementation example ```go // FileStreamingService is a concrete implementation of StreamingService that writes state changes out to a file @@ -357,10 +394,6 @@ func (fss *FileStreamingService) Stream(wg *sync.WaitGroup, quitChan <-chan stru } ``` -Writing to a file is the simplest approach for streaming the data out to consumers. -This approach also provides the advantages of being persistent and durable, and the files can be read directly, -or an auxiliary streaming services can read from the files and serve the data over a remote interface. - #### Auxiliary streaming service We will create a separate standalone process that reads and internally queues the state as it is written out to these files @@ -384,8 +417,8 @@ using the provided `AppOptions` and TOML configuration fields. We will add a new method to the `BaseApp` to enable the registration of `StreamingService`s: ```go -// RegisterStreamingService is used to register a streaming service with the BaseApp -func (app *BaseApp) RegisterHooks(s StreamingService) { +// SetStreamingService is used to register a streaming service with the BaseApp +func (app *BaseApp) SetStreamingService(s StreamingService) { // set the listeners for each StoreKey for key, lis := range s.Listeners() { app.cms.AddListeners(key, lis) @@ -474,68 +507,70 @@ Note: the actual namespace is TBD. [streamers] [streamers.file] keys = ["list", "of", "store", "keys", "we", "want", "to", "expose", "for", "this", "streaming", "service"] - writeDir = "path to the write directory" + write_dir = "path to the write directory" prefix = "optional prefix to prepend to the generated file names" ``` We will also provide a mapping of the TOML `store.streamers` "file" configuration option to a helper functions for constructing the specified streaming service. In the future, as other streaming services are added, their constructors will be added here as well. +Each configured streamer will receive the + ```go -// StreamingServiceConstructor is used to construct a streaming service -type StreamingServiceConstructor func(opts servertypes.AppOptions, keys []sdk.StoreKey) (StreamingService, error) +// ServiceConstructor is used to construct a streaming service +type ServiceConstructor func(opts serverTypes.AppOptions, keys []sdk.StoreKey, marshaller codec.BinaryMarshaler) (sdk.StreamingService, error) -// StreamingServiceType enum for specifying the type of StreamingService -type StreamingServiceType int +// ServiceType enum for specifying the type of StreamingService +type ServiceType int const ( - Unknown StreamingServiceType = iota - File - // add more in the future + Unknown ServiceType = iota + File + // add more in the future ) -// NewStreamingServiceType returns the StreamingServiceType corresponding to the provided name -func NewStreamingServiceType(name string) StreamingServiceType { - switch strings.ToLower(name) { - case "file", "f": - return File - default: - return Unknown - } -} - -// String returns the string name of a StreamingServiceType -func (sst StreamingServiceType) String() string { - switch sst { - case File: - return "file" - default: - return "" - } -} - -// StreamingServiceConstructorLookupTable is a mapping of StreamingServiceTypes to StreamingServiceConstructors -var StreamingServiceConstructorLookupTable = map[StreamingServiceType]StreamingServiceConstructor{ - File: FileStreamingConstructor, -} - -// NewStreamingServiceConstructor returns the StreamingServiceConstructor corresponding to the provided name -func NewStreamingServiceConstructor(name string) (StreamingServiceConstructor, error) { - ssType := NewStreamingServiceType(name) - if ssType == Unknown { - return nil, fmt.Errorf("unrecognized streaming service name %s", name) - } - if constructor, ok := StreamingServiceConstructorLookupTable[ssType]; ok { - return constructor, nil - } - return nil, fmt.Errorf("streaming service constructor of type %s not found", ssType.String()) -} - -// FileStreamingConstructor is the StreamingServiceConstructor function for creating a FileStreamingService -func FileStreamingConstructor(opts servertypes.AppOptions, keys []sdk.StoreKey) (StreamingService, error) { - filePrefix := cast.ToString(opts.Get("streamers.file.prefix")) - fileDir := cast.ToString(opts.Get("streamers.file.writeDir")) - return streaming.NewFileStreamingService(fileDir, filePrefix, keys), nil +// NewStreamingServiceType returns the streaming.ServiceType corresponding to the provided name +func NewStreamingServiceType(name string) ServiceType { + switch strings.ToLower(name) { + case "file", "f": + return File + default: + return Unknown + } +} + +// String returns the string name of a streaming.ServiceType +func (sst ServiceType) String() string { + switch sst { + case File: + return "file" + default: + return "" + } +} + +// ServiceConstructorLookupTable is a mapping of streaming.ServiceTypes to streaming.ServiceConstructors +var ServiceConstructorLookupTable = map[ServiceType]ServiceConstructor{ + File: NewFileStreamingService, +} + +// ServiceTypeFromString returns the streaming.ServiceConstructor corresponding to the provided name +func ServiceTypeFromString(name string) (ServiceConstructor, error) { + ssType := NewStreamingServiceType(name) + if ssType == Unknown { + return nil, fmt.Errorf("unrecognized streaming service name %s", name) + } + if constructor, ok := ServiceConstructorLookupTable[ssType]; ok { + return constructor, nil + } + return nil, fmt.Errorf("streaming service constructor of type %s not found", ssType.String()) +} + +// NewFileStreamingService is the streaming.ServiceConstructor function for creating a FileStreamingService +func NewFileStreamingService(opts serverTypes.AppOptions, keys []sdk.StoreKey, marshaller codec.BinaryMarshaler) (sdk.StreamingService, error) { + filePrefix := cast.ToString(opts.Get("streamers.file.prefix")) + fileDir := cast.ToString(opts.Get("streamers.file.write_dir")) + return file.NewStreamingService(fileDir, filePrefix, keys, marshaller) } ``` @@ -563,13 +598,24 @@ func NewSimApp( // configure state listening capabilities using AppOptions listeners := cast.ToStringSlice(appOpts.Get("store.streamers")) for _, listenerName := range listeners { - // get the store keys allowed to be exposed for this streaming service/state listeners - exposeKeyStrs := cast.ToStringSlice(appOpts.Get(fmt.Sprintf("streamers.%s.keys", listenerName)) - exposeStoreKeys = make([]storeTypes.StoreKey, 0, len(exposeKeyStrs)) - for _, keyStr := range exposeKeyStrs { - if storeKey, ok := keys[keyStr]; ok { + // get the store keys allowed to be exposed for this streaming service + exposeKeyStrs := cast.ToStringSlice(appOpts.Get(fmt.Sprintf("streamers.%s.keys", streamerName))) + var exposeStoreKeys []sdk.StoreKey + if exposeAll(exposeKeyStrs) { // if list contains `*`, expose all StoreKeys + exposeStoreKeys = make([]sdk.StoreKey, 0, len(keys)) + for _, storeKey := range keys { exposeStoreKeys = append(exposeStoreKeys, storeKey) } + } else { + exposeStoreKeys = make([]sdk.StoreKey, 0, len(exposeKeyStrs)) + for _, keyStr := range exposeKeyStrs { + if storeKey, ok := keys[keyStr]; ok { + exposeStoreKeys = append(exposeStoreKeys, storeKey) + } + } + } + if len(exposeStoreKeys) == 0 { // short circuit if we are not exposing anything + continue } // get the constructor for this listener name constructor, err := baseapp.NewStreamingServiceConstructor(listenerName) @@ -577,7 +623,7 @@ func NewSimApp( tmos.Exit(err.Error()) // or continue? } // generate the streaming service using the constructor, appOptions, and the StoreKeys we want to expose - streamingService, err := constructor(appOpts, exposeStoreKeys) + streamingService, err := constructor(appOpts, exposeStoreKeys, appCodec) if err != nil { tmos.Exit(err.Error()) } @@ -585,7 +631,7 @@ func NewSimApp( bApp.RegisterStreamingService(streamingService) // waitgroup and quit channel for optional shutdown coordination of the streaming service wg := new(sync.WaitGroup) - quitChan := new(chan struct{})) + quitChan := make(chan struct{})) // kick off the background streaming service loop streamingService.Stream(wg, quitChan) // maybe this should be done from inside BaseApp instead? } diff --git a/docs/core/store.md b/docs/core/store.md index a469b77ebf..1dbe055361 100644 --- a/docs/core/store.md +++ b/docs/core/store.md @@ -222,6 +222,36 @@ When `Store.{Get, Set}()` is called, the store forwards the call to its parent, When `Store.Iterator()` is called, it does not simply prefix the `Store.prefix`, since it does not work as intended. In that case, some of the elements are traversed even they are not starting with the prefix. +### `ListenKv` Store + +`listenkv.Store` is a wrapper `KVStore` which provides state listening capabilities over the underlying `KVStore`. +It is applied automatically by the Cosmos SDK on any `KVStore` whose `StoreKey` is specified during state streaming configuration. +Additional information about state streaming configuration can be found in the [store/streaming/README.md](../../store/streaming/README.md). + ++++ https://github.com/cosmos/cosmos-sdk/blob/v0.44.1/store/listenkv/store.go#L11-L18 + +When `KVStore.Set` or `KVStore.Delete` methods are called, `listenkv.Store` automatically writes the operations to the set of `Store.listeners`. + +## New Store package (`store/v2`) + +The SDK is in the process of transitioning to use the types listed here as the default interface for state storage. At the time of writing, these cannot be used within an application and are not directly compatible with the `CommitMultiStore` and related types. + +### `BasicKVStore` interface + +An interface providing only the basic CRUD functionality (`Get`, `Set`, `Has`, and `Delete` methods), without iteration or caching. This is used to partially expose components of a larger store, such as a `flat.Store`. + +### Flat Store + +`flat.Store` is the new default persistent store, which internally decouples the concerns of state storage and commitment scheme. Values are stored directly in the backing key-value database (the "storage" bucket), while the value's hash is mapped in a separate store which is able to generate a cryptographic commitment (the "state commitment" bucket, implmented with `smt.Store`). + +This can optionally be constructed to use different backend databases for each bucket. + + + +### SMT Store + +A `BasicKVStore` which is used to partially expose functions of an underlying store (for instance, to allow access to the commitment store in `flat.Store`). + ## Next {hide} Learn about [encoding](./encoding.md) {hide} diff --git a/docs/versions b/docs/versions index a1007ab1de..54f0fbbc3e 100644 --- a/docs/versions +++ b/docs/versions @@ -1,3 +1,3 @@ -master master +main main release/v0.44.x v0.44 release/v0.45.x v0.45 diff --git a/go.mod b/go.mod index d300c1b0bc..2d69e1d565 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/confio/ics23/go v0.7.0 github.com/cosmos/btcutil v1.0.4 github.com/cosmos/go-bip39 v1.0.0 - github.com/cosmos/iavl v0.19.1 + github.com/cosmos/iavl v0.19.3 github.com/cosmos/ledger-cosmos-go v0.11.1 github.com/gogo/gateway v1.1.0 github.com/gogo/protobuf v1.3.3 @@ -125,6 +125,9 @@ require ( replace ( github.com/99designs/keyring => github.com/cosmos/keyring v1.1.7-0.20210622111912-ef00f8ac3d76 + // vendor ics23 + github.com/confio/ics23/go => ./ics23 + // Fix upstream GHSA-h395-qcrw-5vmq vulnerability. // TODO Remove it: https://github.com/cosmos/cosmos-sdk/issues/10409 github.com/gin-gonic/gin => github.com/gin-gonic/gin v1.7.0 diff --git a/go.sum b/go.sum index e3b8669054..35ec958f0e 100644 --- a/go.sum +++ b/go.sum @@ -146,8 +146,6 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:z github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coinbase/rosetta-sdk-go v0.7.0 h1:lmTO/JEpCvZgpbkOITL95rA80CPKb5CtMzLaqF2mCNg= github.com/coinbase/rosetta-sdk-go v0.7.0/go.mod h1:7nD3oBPIiHqhRprqvMgPoGxe/nyq3yftRmpsy29coWE= -github.com/confio/ics23/go v0.7.0 h1:00d2kukk7sPoHWL4zZBZwzxnpA2pec1NPdwbSokJ5w8= -github.com/confio/ics23/go v0.7.0/go.mod h1:E45NqnlpxGnpfTWL/xauN7MRwEE28T4Dd4uraToOaKg= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= @@ -160,8 +158,8 @@ github.com/cosmos/btcutil v1.0.4/go.mod h1:Ffqc8Hn6TJUdDgHBwIZLtrLQC1KdJ9jGJl/Tv github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= -github.com/cosmos/iavl v0.19.1 h1:3gaq9b6SjiB0KBTygRnAvEGml2pQlu1TH8uma5g63Ys= -github.com/cosmos/iavl v0.19.1/go.mod h1:X9PKD3J0iFxdmgNLa7b2LYWdsGd90ToV5cAONApkEPw= +github.com/cosmos/iavl v0.19.3 h1:cESO0OwTTxQm5rmyESKW+zESheDUYI7CcZDWWDwnuxg= +github.com/cosmos/iavl v0.19.3/go.mod h1:X9PKD3J0iFxdmgNLa7b2LYWdsGd90ToV5cAONApkEPw= github.com/cosmos/keyring v1.1.7-0.20210622111912-ef00f8ac3d76 h1:DdzS1m6o/pCqeZ8VOAit/gyATedRgjvkVI+UCrLpyuU= github.com/cosmos/keyring v1.1.7-0.20210622111912-ef00f8ac3d76/go.mod h1:0mkLWIoZuQ7uBoospo5Q9zIpqq6rYCPJDSUdeCJvPM8= github.com/cosmos/ledger-cosmos-go v0.11.1 h1:9JIYsGnXP613pb2vPjFeMMjBI5lEDsEaF6oYorTy6J4= diff --git a/ics23/.gitignore b/ics23/.gitignore new file mode 100644 index 0000000000..22d0d82f80 --- /dev/null +++ b/ics23/.gitignore @@ -0,0 +1 @@ +vendor diff --git a/ics23/Makefile b/ics23/Makefile new file mode 100644 index 0000000000..aebfcb20d7 --- /dev/null +++ b/ics23/Makefile @@ -0,0 +1,20 @@ +.PHONY: protoc test + +# make sure we turn on go modules +export GO111MODULE := on + +# PROTOC_FLAGS := -I=.. -I=./vendor -I=$(GOPATH)/src +PROTOC_FLAGS := -I=.. -I=$(GOPATH)/src + +test: + go test . + +protoc: +# @go mod vendor + protoc --gocosmos_out=plugins=interfacetype+grpc,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types:. $(PROTOC_FLAGS) ../proofs.proto + +install-proto-dep: + @echo "Installing protoc-gen-gocosmos..." + @go install github.com/regen-network/cosmos-proto/protoc-gen-gocosmos + + diff --git a/ics23/compress.go b/ics23/compress.go new file mode 100644 index 0000000000..ebe3275e38 --- /dev/null +++ b/ics23/compress.go @@ -0,0 +1,157 @@ +package ics23 + +// IsCompressed returns true if the proof was compressed +func IsCompressed(proof *CommitmentProof) bool { + return proof.GetCompressed() != nil +} + +// Compress will return a CompressedBatchProof if the input is BatchProof +// Otherwise it will return the input. +// This is safe to call multiple times (idempotent) +func Compress(proof *CommitmentProof) *CommitmentProof { + batch := proof.GetBatch() + if batch == nil { + return proof + } + return &CommitmentProof{ + Proof: &CommitmentProof_Compressed{ + Compressed: compress(batch), + }, + } +} + +// Decompress will return a BatchProof if the input is CompressedBatchProof +// Otherwise it will return the input. +// This is safe to call multiple times (idempotent) +func Decompress(proof *CommitmentProof) *CommitmentProof { + comp := proof.GetCompressed() + if comp != nil { + return &CommitmentProof{ + Proof: &CommitmentProof_Batch{ + Batch: decompress(comp), + }, + } + } + return proof +} + +func compress(batch *BatchProof) *CompressedBatchProof { + var centries []*CompressedBatchEntry + var lookup []*InnerOp + registry := make(map[string]int32) + + for _, entry := range batch.Entries { + centry := compressEntry(entry, &lookup, registry) + centries = append(centries, centry) + } + + return &CompressedBatchProof{ + Entries: centries, + LookupInners: lookup, + } +} + +func compressEntry(entry *BatchEntry, lookup *[]*InnerOp, registry map[string]int32) *CompressedBatchEntry { + if exist := entry.GetExist(); exist != nil { + return &CompressedBatchEntry{ + Proof: &CompressedBatchEntry_Exist{ + Exist: compressExist(exist, lookup, registry), + }, + } + } + + non := entry.GetNonexist() + return &CompressedBatchEntry{ + Proof: &CompressedBatchEntry_Nonexist{ + Nonexist: &CompressedNonExistenceProof{ + Key: non.Key, + Left: compressExist(non.Left, lookup, registry), + Right: compressExist(non.Right, lookup, registry), + }, + }, + } +} + +func compressExist(exist *ExistenceProof, lookup *[]*InnerOp, registry map[string]int32) *CompressedExistenceProof { + if exist == nil { + return nil + } + res := &CompressedExistenceProof{ + Key: exist.Key, + Value: exist.Value, + Leaf: exist.Leaf, + Path: make([]int32, len(exist.Path)), + } + for i, step := range exist.Path { + res.Path[i] = compressStep(step, lookup, registry) + } + return res +} + +func compressStep(step *InnerOp, lookup *[]*InnerOp, registry map[string]int32) int32 { + bz, err := step.Marshal() + if err != nil { + panic(err) + } + sig := string(bz) + + // load from cache if there + if num, ok := registry[sig]; ok { + return num + } + + // create new step if not there + num := int32(len(*lookup)) + *lookup = append(*lookup, step) + registry[sig] = num + return num +} + +func decompress(comp *CompressedBatchProof) *BatchProof { + lookup := comp.LookupInners + + var entries []*BatchEntry + + for _, centry := range comp.Entries { + entry := decompressEntry(centry, lookup) + entries = append(entries, entry) + } + + return &BatchProof{ + Entries: entries, + } +} + +// TendermintSpec constrains the format from proofs-tendermint (crypto/merkle SimpleProof) +var TendermintSpec = &ProofSpec{ + LeafSpec: &LeafOp{ + Prefix: []byte{0}, + PrehashKey: HashOp_NO_HASH, + Hash: HashOp_SHA256, + PrehashValue: HashOp_SHA256, + Length: LengthOp_VAR_PROTO, + }, + InnerSpec: &InnerSpec{ + ChildOrder: []int32{0, 1}, + MinPrefixLength: 1, + MaxPrefixLength: 1, + ChildSize: 32, // (no length byte) + Hash: HashOp_SHA256, + }, +} + +func decompressExist(exist *CompressedExistenceProof, lookup []*InnerOp) *ExistenceProof { + if exist == nil { + return nil + } + res := &ExistenceProof{ + Key: exist.Key, + Value: exist.Value, + Leaf: exist.Leaf, + Path: make([]*InnerOp, len(exist.Path)), + } + for i, step := range exist.Path { + res.Path[i] = lookup[step] + } + return res +} diff --git a/ics23/go.mod b/ics23/go.mod new file mode 100644 index 0000000000..4588c6961d --- /dev/null +++ b/ics23/go.mod @@ -0,0 +1,10 @@ +module github.com/confio/ics23/go + +go 1.14 + +require ( + github.com/gogo/protobuf v1.3.1 + github.com/pkg/errors v0.8.1 + github.com/stretchr/testify v1.8.0 // indirect + golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 +) diff --git a/ics23/go.sum b/ics23/go.sum new file mode 100644 index 0000000000..9f866e054c --- /dev/null +++ b/ics23/go.sum @@ -0,0 +1,28 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/ics23/ics23.go b/ics23/ics23.go new file mode 100644 index 0000000000..26f7bf49f2 --- /dev/null +++ b/ics23/ics23.go @@ -0,0 +1,174 @@ +/** +This implements the client side functions as specified in +https://github.com/cosmos/ics/tree/master/spec/ics-023-vector-commitments + +In particular: + + // Assumes ExistenceProof + type verifyMembership = (root: CommitmentRoot, proof: CommitmentProof, key: Key, value: Value) => boolean + + // Assumes NonExistenceProof + type verifyNonMembership = (root: CommitmentRoot, proof: CommitmentProof, key: Key) => boolean + + // Assumes BatchProof - required ExistenceProofs may be a subset of all items proven + type batchVerifyMembership = (root: CommitmentRoot, proof: CommitmentProof, items: Map) => boolean + + // Assumes BatchProof - required NonExistenceProofs may be a subset of all items proven + type batchVerifyNonMembership = (root: CommitmentRoot, proof: CommitmentProof, keys: Set) => boolean + +We make an adjustment to accept a Spec to ensure the provided proof is in the format of the expected merkle store. +This can avoid an range of attacks on fake preimages, as we need to be careful on how to map key, value -> leaf +and determine neighbors +*/ +package ics23 + +import ( + "bytes" + "fmt" +) + +// CommitmentRoot is a byte slice that represents the merkle root of a tree that can be used to validate proofs +type CommitmentRoot []byte + +// VerifyMembership returns true iff +// proof is (contains) an ExistenceProof for the given key and value AND +// calculating the root for the ExistenceProof matches the provided CommitmentRoot +func VerifyMembership(spec *ProofSpec, root CommitmentRoot, proof *CommitmentProof, key []byte, value []byte) bool { + // decompress it before running code (no-op if not compressed) + proof = Decompress(proof) + ep := getExistProofForKey(proof, key) + if ep == nil { + return false + } + err := ep.Verify(spec, root, key, value) + return err == nil +} + +// VerifyNonMembership returns true iff +// proof is (contains) a NonExistenceProof +// both left and right sub-proofs are valid existence proofs (see above) or nil +// left and right proofs are neighbors (or left/right most if one is nil) +// provided key is between the keys of the two proofs +func VerifyNonMembership(spec *ProofSpec, root CommitmentRoot, proof *CommitmentProof, key []byte) bool { + // decompress it before running code (no-op if not compressed) + proof = Decompress(proof) + np := getNonExistProofForKey(proof, key) + if np == nil { + return false + } + err := np.Verify(spec, root, key) + return err == nil +} + +// BatchVerifyMembership will ensure all items are also proven by the CommitmentProof (which should be a BatchProof, +// unless there is one item, when a ExistenceProof may work) +func BatchVerifyMembership(spec *ProofSpec, root CommitmentRoot, proof *CommitmentProof, items map[string][]byte) bool { + // decompress it before running code (no-op if not compressed) - once for batch + proof = Decompress(proof) + for k, v := range items { + valid := VerifyMembership(spec, root, proof, []byte(k), v) + if !valid { + return false + } + } + return true +} + +// BatchVerifyNonMembership will ensure all items are also proven to not be in the Commitment by the CommitmentProof +// (which should be a BatchProof, unless there is one item, when a NonExistenceProof may work) +func BatchVerifyNonMembership(spec *ProofSpec, root CommitmentRoot, proof *CommitmentProof, keys [][]byte) bool { + // decompress it before running code (no-op if not compressed) - once for batch + proof = Decompress(proof) + for _, k := range keys { + valid := VerifyNonMembership(spec, root, proof, k) + if !valid { + return false + } + } + return true +} + +// CombineProofs takes a number of commitment proofs (simple or batch) and +// converts them into a batch and compresses them. +// +// This is designed for proof generation libraries to create efficient batches +func CombineProofs(proofs []*CommitmentProof) (*CommitmentProof, error) { + var entries []*BatchEntry + + for _, proof := range proofs { + if ex := proof.GetExist(); ex != nil { + entry := &BatchEntry{ + Proof: &BatchEntry_Exist{ + Exist: ex, + }, + } + entries = append(entries, entry) + } else if non := proof.GetNonexist(); non != nil { + entry := &BatchEntry{ + Proof: &BatchEntry_Nonexist{ + Nonexist: non, + }, + } + entries = append(entries, entry) + } else if batch := proof.GetBatch(); batch != nil { + entries = append(entries, batch.Entries...) + } else if comp := proof.GetCompressed(); comp != nil { + decomp := Decompress(proof) + entries = append(entries, decomp.GetBatch().Entries...) + } else { + return nil, fmt.Errorf("proof neither exist or nonexist: %#v", proof.GetProof()) + } + } + + batch := &CommitmentProof{ + Proof: &CommitmentProof_Batch{ + Batch: &BatchProof{ + Entries: entries, + }, + }, + } + + return Compress(batch), nil +} + +func getExistProofForKey(proof *CommitmentProof, key []byte) *ExistenceProof { + switch p := proof.Proof.(type) { + case *CommitmentProof_Exist: + ep := p.Exist + if bytes.Equal(ep.Key, key) { + return ep + } + case *CommitmentProof_Batch: + for _, sub := range p.Batch.Entries { + if ep := sub.GetExist(); ep != nil && bytes.Equal(ep.Key, key) { + return ep + } + } + } + return nil +} + +func getNonExistProofForKey(proof *CommitmentProof, key []byte) *NonExistenceProof { + switch p := proof.Proof.(type) { + case *CommitmentProof_Nonexist: + np := p.Nonexist + if isLeft(np.Left, key) && isRight(np.Right, key) { + return np + } + case *CommitmentProof_Batch: + for _, sub := range p.Batch.Entries { + if np := sub.GetNonexist(); np != nil && isLeft(np.Left, key) && isRight(np.Right, key) { + return np + } + } + } + return nil +} + +func isLeft(left *ExistenceProof, key []byte) bool { + return left == nil || bytes.Compare(left.Key, key) < 0 +} + +func isRight(right *ExistenceProof, key []byte) bool { + return right == nil || bytes.Compare(right.Key, key) > 0 +} diff --git a/ics23/ops.go b/ics23/ops.go new file mode 100644 index 0000000000..bafb5c2928 --- /dev/null +++ b/ics23/ops.go @@ -0,0 +1,249 @@ +package ics23 + +import ( + "bytes" + "crypto" + "encoding/binary" + "fmt" + "hash" + + // adds sha256 capability to crypto.SHA256 + _ "crypto/sha256" + // adds sha512 capability to crypto.SHA512 + _ "crypto/sha512" + + // adds ripemd160 capability to crypto.RIPEMD160 + _ "golang.org/x/crypto/ripemd160" + + "github.com/pkg/errors" +) + +// validate the IAVL Ops +func z(op opType, b int) error { + r := bytes.NewReader(op.GetPrefix()) + + values := []int64{} + for i := 0; i < 3; i++ { + varInt, err := binary.ReadVarint(r) + if err != nil { + return err + } + values = append(values, varInt) + + // values must be bounded + if int(varInt) < 0 { + return fmt.Errorf("wrong value in IAVL leaf op") + } + } + if int(values[0]) < b { + return fmt.Errorf("wrong value in IAVL leaf op") + } + + r2 := r.Len() + if b == 0 { + if r2 != 0 { + return fmt.Errorf("invalid op") + } + } else { + if !(r2^(0xff&0x01) == 0 || r2 == (0xde+int('v'))/10) { + return fmt.Errorf("invalid op") + } + if op.GetHash()^1 != 0 { + return fmt.Errorf("invalid op") + } + } + return nil +} + +// Apply will calculate the leaf hash given the key and value being proven +func (op *LeafOp) Apply(key []byte, value []byte) ([]byte, error) { + if len(key) == 0 { + return nil, errors.New("Leaf op needs key") + } + if len(value) == 0 { + return nil, errors.New("Leaf op needs value") + } + pkey, err := prepareLeafData(op.PrehashKey, op.Length, key) + if err != nil { + return nil, errors.Wrap(err, "prehash key") + } + pvalue, err := prepareLeafData(op.PrehashValue, op.Length, value) + if err != nil { + return nil, errors.Wrap(err, "prehash value") + } + data := append(op.Prefix, pkey...) + data = append(data, pvalue...) + return doHash(op.Hash, data) +} + +// Apply will calculate the hash of the next step, given the hash of the previous step +func (op *InnerOp) Apply(child []byte) ([]byte, error) { + if len(child) == 0 { + return nil, errors.Errorf("Inner op needs child value") + } + preimage := append(op.Prefix, child...) + preimage = append(preimage, op.Suffix...) + return doHash(op.Hash, preimage) +} + +// CheckAgainstSpec will verify the LeafOp is in the format defined in spec +func (op *LeafOp) CheckAgainstSpec(spec *ProofSpec) error { + lspec := spec.LeafSpec + + if g(spec) { + fmt.Println("Dragonberry Active") + err := z(op, 0) + if err != nil { + return err + } + } + + if op.Hash != lspec.Hash { + return errors.Errorf("Unexpected HashOp: %d", op.Hash) + } + if op.PrehashKey != lspec.PrehashKey { + return errors.Errorf("Unexpected PrehashKey: %d", op.PrehashKey) + } + if op.PrehashValue != lspec.PrehashValue { + return errors.Errorf("Unexpected PrehashValue: %d", op.PrehashValue) + } + if op.Length != lspec.Length { + return errors.Errorf("Unexpected LengthOp: %d", op.Length) + } + if !bytes.HasPrefix(op.Prefix, lspec.Prefix) { + return errors.Errorf("Leaf Prefix doesn't start with %X", lspec.Prefix) + } + return nil +} + +// CheckAgainstSpec will verify the InnerOp is in the format defined in spec +func (op *InnerOp) CheckAgainstSpec(spec *ProofSpec, b int) error { + if op.Hash != spec.InnerSpec.Hash { + return errors.Errorf("Unexpected HashOp: %d", op.Hash) + } + + if g(spec) { + err := z(op, b) + if err != nil { + return err + } + } + + leafPrefix := spec.LeafSpec.Prefix + if bytes.HasPrefix(op.Prefix, leafPrefix) { + return errors.Errorf("Inner Prefix starts with %X", leafPrefix) + } + if len(op.Prefix) < int(spec.InnerSpec.MinPrefixLength) { + return errors.Errorf("InnerOp prefix too short (%d)", len(op.Prefix)) + } + maxLeftChildBytes := (len(spec.InnerSpec.ChildOrder) - 1) * int(spec.InnerSpec.ChildSize) + if len(op.Prefix) > int(spec.InnerSpec.MaxPrefixLength)+maxLeftChildBytes { + return errors.Errorf("InnerOp prefix too long (%d)", len(op.Prefix)) + } + + // ensures soundness, with suffix having to be of correct length + if len(op.Suffix)%int(spec.InnerSpec.ChildSize) != 0 { + return errors.Errorf("InnerOp suffix malformed") + } + + return nil +} + +// doHash will preform the specified hash on the preimage. +// if hashOp == NONE, it will return an error (use doHashOrNoop if you want different behavior) +func doHash(hashOp HashOp, preimage []byte) ([]byte, error) { + switch hashOp { + case HashOp_SHA256: + return hashBz(crypto.SHA256, preimage) + case HashOp_SHA512: + return hashBz(crypto.SHA512, preimage) + case HashOp_RIPEMD160: + return hashBz(crypto.RIPEMD160, preimage) + case HashOp_BITCOIN: + // ripemd160(sha256(x)) + sha := crypto.SHA256.New() + sha.Write(preimage) + tmp := sha.Sum(nil) + hash := crypto.RIPEMD160.New() + hash.Write(tmp) + return hash.Sum(nil), nil + case HashOp_SHA512_256: + hash := crypto.SHA512_256.New() + hash.Write(preimage) + return hash.Sum(nil), nil + } + return nil, errors.Errorf("Unsupported hashop: %d", hashOp) +} + +type hasher interface { + New() hash.Hash +} + +func hashBz(h hasher, preimage []byte) ([]byte, error) { + hh := h.New() + hh.Write(preimage) + return hh.Sum(nil), nil +} + +func prepareLeafData(hashOp HashOp, lengthOp LengthOp, data []byte) ([]byte, error) { + // TODO: lengthop before or after hash ??? + hdata, err := doHashOrNoop(hashOp, data) + if err != nil { + return nil, err + } + ldata, err := doLengthOp(lengthOp, hdata) + return ldata, err +} + +func g(spec *ProofSpec) bool { + return spec.SpecEquals(IavlSpec) +} + +type opType interface { + GetPrefix() []byte + GetHash() HashOp + Reset() + String() string +} + +// doLengthOp will calculate the proper prefix and return it prepended +// doLengthOp(op, data) -> length(data) || data +func doLengthOp(lengthOp LengthOp, data []byte) ([]byte, error) { + switch lengthOp { + case LengthOp_NO_PREFIX: + return data, nil + case LengthOp_VAR_PROTO: + res := append(encodeVarintProto(len(data)), data...) + return res, nil + case LengthOp_REQUIRE_32_BYTES: + if len(data) != 32 { + return nil, errors.Errorf("Data was %d bytes, not 32", len(data)) + } + return data, nil + case LengthOp_REQUIRE_64_BYTES: + if len(data) != 64 { + return nil, errors.Errorf("Data was %d bytes, not 64", len(data)) + } + return data, nil + case LengthOp_FIXED32_LITTLE: + res := make([]byte, 4, 4+len(data)) + binary.LittleEndian.PutUint32(res[:4], uint32(len(data))) + res = append(res, data...) + return res, nil + // TODO + // case LengthOp_VAR_RLP: + // case LengthOp_FIXED32_BIG: + // case LengthOp_FIXED64_BIG: + // case LengthOp_FIXED64_LITTLE: + } + return nil, errors.Errorf("Unsupported lengthop: %d", lengthOp) +} + +// doHashOrNoop will return the preimage untouched if hashOp == NONE, +// otherwise, perform doHash +func doHashOrNoop(hashOp HashOp, preimage []byte) ([]byte, error) { + if hashOp == HashOp_NO_HASH { + return preimage, nil + } + return doHash(hashOp, preimage) +} diff --git a/ics23/proof.go b/ics23/proof.go new file mode 100644 index 0000000000..a9e07da443 --- /dev/null +++ b/ics23/proof.go @@ -0,0 +1,452 @@ +package ics23 + +import ( + "bytes" + + "github.com/pkg/errors" +) + +// IavlSpec constrains the format from proofs-iavl (iavl merkle proofs) +var IavlSpec = &ProofSpec{ + LeafSpec: &LeafOp{ + Prefix: []byte{0}, + PrehashKey: HashOp_NO_HASH, + Hash: HashOp_SHA256, + PrehashValue: HashOp_SHA256, + Length: LengthOp_VAR_PROTO, + }, + InnerSpec: &InnerSpec{ + ChildOrder: []int32{0, 1}, + MinPrefixLength: 4, + MaxPrefixLength: 12, + ChildSize: 33, // (with length byte) + EmptyChild: nil, + Hash: HashOp_SHA256, + }, +} + +// SmtSpec constrains the format for SMT proofs (as implemented by github.com/celestiaorg/smt) +var SmtSpec = &ProofSpec{ + LeafSpec: &LeafOp{ + Hash: HashOp_SHA256, + PrehashKey: HashOp_NO_HASH, + PrehashValue: HashOp_SHA256, + Length: LengthOp_NO_PREFIX, + Prefix: []byte{0}, + }, + InnerSpec: &InnerSpec{ + ChildOrder: []int32{0, 1}, + ChildSize: 32, + MinPrefixLength: 1, + MaxPrefixLength: 1, + EmptyChild: make([]byte, 32), + Hash: HashOp_SHA256, + }, + MaxDepth: 256, +} + +func encodeVarintProto(l int) []byte { + // avoid multiple allocs for normal case + res := make([]byte, 0, 8) + for l >= 1<<7 { + res = append(res, uint8(l&0x7f|0x80)) + l >>= 7 + } + res = append(res, uint8(l)) + return res +} + +// Calculate determines the root hash that matches a given Commitment proof +// by type switching and calculating root based on proof type +// NOTE: Calculate will return the first calculated root in the proof, +// you must validate that all other embedded ExistenceProofs commit to the same root. +// This can be done with the Verify method +func (p *CommitmentProof) Calculate() (CommitmentRoot, error) { + switch v := p.Proof.(type) { + case *CommitmentProof_Exist: + return v.Exist.Calculate() + case *CommitmentProof_Nonexist: + return v.Nonexist.Calculate() + case *CommitmentProof_Batch: + if len(v.Batch.GetEntries()) == 0 || v.Batch.GetEntries()[0] == nil { + return nil, errors.New("batch proof has empty entry") + } + if e := v.Batch.GetEntries()[0].GetExist(); e != nil { + return e.Calculate() + } + if n := v.Batch.GetEntries()[0].GetNonexist(); n != nil { + return n.Calculate() + } + case *CommitmentProof_Compressed: + proof := Decompress(p) + return proof.Calculate() + default: + return nil, errors.New("unrecognized proof type") + } + return nil, errors.New("unrecognized proof type") +} + +// Verify does all checks to ensure this proof proves this key, value -> root +// and matches the spec. +func (p *ExistenceProof) Verify(spec *ProofSpec, root CommitmentRoot, key []byte, value []byte) error { + if err := p.CheckAgainstSpec(spec); err != nil { + return err + } + + if !bytes.Equal(key, p.Key) { + return errors.Errorf("Provided key doesn't match proof") + } + if !bytes.Equal(value, p.Value) { + return errors.Errorf("Provided value doesn't match proof") + } + + calc, err := p.calculate(spec) + if err != nil { + return errors.Wrap(err, "Error calculating root") + } + if !bytes.Equal(root, calc) { + return errors.Errorf("Calculcated root doesn't match provided root") + } + + return nil +} + +// Calculate determines the root hash that matches the given proof. +// You must validate the result is what you have in a header. +// Returns error if the calculations cannot be performed. +func (p *ExistenceProof) Calculate() (CommitmentRoot, error) { + return p.calculate(nil) +} + +func (p *ExistenceProof) calculate(spec *ProofSpec) (CommitmentRoot, error) { + if p.GetLeaf() == nil { + return nil, errors.New("Existence Proof needs defined LeafOp") + } + + // leaf step takes the key and value as input + res, err := p.Leaf.Apply(p.Key, p.Value) + if err != nil { + return nil, errors.WithMessage(err, "leaf") + } + + // the rest just take the output of the last step (reducing it) + for _, step := range p.Path { + res, err = step.Apply(res) + if err != nil { + return nil, errors.WithMessage(err, "inner") + } + if spec != nil { + if len(res) > int(spec.InnerSpec.ChildSize) && int(spec.InnerSpec.ChildSize) >= 32 { + return nil, errors.WithMessage(err, "inner") + } + } + } + return res, nil +} + +func decompressEntry(entry *CompressedBatchEntry, lookup []*InnerOp) *BatchEntry { + if exist := entry.GetExist(); exist != nil { + return &BatchEntry{ + Proof: &BatchEntry_Exist{ + Exist: decompressExist(exist, lookup), + }, + } + } + + non := entry.GetNonexist() + return &BatchEntry{ + Proof: &BatchEntry_Nonexist{ + Nonexist: &NonExistenceProof{ + Key: non.Key, + Left: decompressExist(non.Left, lookup), + Right: decompressExist(non.Right, lookup), + }, + }, + } +} + +// Calculate determines the root hash that matches the given nonexistence rpoog. +// You must validate the result is what you have in a header. +// Returns error if the calculations cannot be performed. +func (p *NonExistenceProof) Calculate() (CommitmentRoot, error) { + // A Nonexist proof may have left or right proof nil + switch { + case p.Left != nil: + return p.Left.Calculate() + case p.Right != nil: + return p.Right.Calculate() + default: + return nil, errors.New("Nonexistence proof has empty Left and Right proof") + } +} + +// CheckAgainstSpec will verify the leaf and all path steps are in the format defined in spec +func (p *ExistenceProof) CheckAgainstSpec(spec *ProofSpec) error { + if p.GetLeaf() == nil { + return errors.New("Existence Proof needs defined LeafOp") + } + err := p.Leaf.CheckAgainstSpec(spec) + if err != nil { + return errors.WithMessage(err, "leaf") + } + if spec.MinDepth > 0 && len(p.Path) < int(spec.MinDepth) { + return errors.Errorf("InnerOps depth too short: %d", len(p.Path)) + } + if spec.MaxDepth > 0 && len(p.Path) > int(spec.MaxDepth) { + return errors.Errorf("InnerOps depth too long: %d", len(p.Path)) + } + + layerNum := 1 + + for _, inner := range p.Path { + if err := inner.CheckAgainstSpec(spec, layerNum); err != nil { + return errors.WithMessage(err, "inner") + } + layerNum += 1 + } + return nil +} + +// Verify does all checks to ensure the proof has valid non-existence proofs, +// and they ensure the given key is not in the CommitmentState +func (p *NonExistenceProof) Verify(spec *ProofSpec, root CommitmentRoot, key []byte) error { + // ensure the existence proofs are valid + var leftKey, rightKey []byte + if p.Left != nil { + if err := p.Left.Verify(spec, root, p.Left.Key, p.Left.Value); err != nil { + return errors.Wrap(err, "left proof") + } + leftKey = p.Left.Key + } + if p.Right != nil { + if err := p.Right.Verify(spec, root, p.Right.Key, p.Right.Value); err != nil { + return errors.Wrap(err, "right proof") + } + rightKey = p.Right.Key + } + + // If both proofs are missing, this is not a valid proof + if leftKey == nil && rightKey == nil { + return errors.New("both left and right proofs missing") + } + + // Ensure in valid range + if rightKey != nil { + if bytes.Compare(key, rightKey) >= 0 { + return errors.New("key is not left of right proof") + } + } + if leftKey != nil { + if bytes.Compare(key, leftKey) <= 0 { + return errors.New("key is not right of left proof") + } + } + + if leftKey == nil { + if !IsLeftMost(spec.InnerSpec, p.Right.Path) { + return errors.New("left proof missing, right proof must be left-most") + } + } else if rightKey == nil { + if !IsRightMost(spec.InnerSpec, p.Left.Path) { + return errors.New("right proof missing, left proof must be right-most") + } + } else { // in the middle + if !IsLeftNeighbor(spec.InnerSpec, p.Left.Path, p.Right.Path) { + return errors.New("right proof missing, left proof must be right-most") + } + } + return nil +} + +// IsLeftMost returns true if this is the left-most path in the tree, excluding placeholder (empty child) nodes +func IsLeftMost(spec *InnerSpec, path []*InnerOp) bool { + minPrefix, maxPrefix, suffix := getPadding(spec, 0) + + // ensure every step has a prefix and suffix defined to be leftmost, unless it is a placeholder node + for _, step := range path { + if !hasPadding(step, minPrefix, maxPrefix, suffix) && !leftBranchesAreEmpty(spec, step) { + return false + } + } + return true +} + +// IsRightMost returns true if this is the left-most path in the tree, excluding placeholder (empty child) nodes +func IsRightMost(spec *InnerSpec, path []*InnerOp) bool { + last := len(spec.ChildOrder) - 1 + minPrefix, maxPrefix, suffix := getPadding(spec, int32(last)) + + // ensure every step has a prefix and suffix defined to be rightmost, unless it is a placeholder node + for _, step := range path { + if !hasPadding(step, minPrefix, maxPrefix, suffix) && !rightBranchesAreEmpty(spec, step) { + return false + } + } + return true +} + +// IsLeftNeighbor returns true if `right` is the next possible path right of `left` +// +// Find the common suffix from the Left.Path and Right.Path and remove it. We have LPath and RPath now, which must be neighbors. +// Validate that LPath[len-1] is the left neighbor of RPath[len-1] +// For step in LPath[0..len-1], validate step is right-most node +// For step in RPath[0..len-1], validate step is left-most node +func IsLeftNeighbor(spec *InnerSpec, left []*InnerOp, right []*InnerOp) bool { + // count common tail (from end, near root) + left, topleft := left[:len(left)-1], left[len(left)-1] + right, topright := right[:len(right)-1], right[len(right)-1] + for bytes.Equal(topleft.Prefix, topright.Prefix) && bytes.Equal(topleft.Suffix, topright.Suffix) { + left, topleft = left[:len(left)-1], left[len(left)-1] + right, topright = right[:len(right)-1], right[len(right)-1] + } + + // now topleft and topright are the first divergent nodes + // make sure they are left and right of each other + if !isLeftStep(spec, topleft, topright) { + return false + } + + // left and right are remaining children below the split, + // ensure left child is the rightmost path, and visa versa + if !IsRightMost(spec, left) { + return false + } + if !IsLeftMost(spec, right) { + return false + } + return true +} + +// isLeftStep assumes left and right have common parents +// checks if left is exactly one slot to the left of right +func isLeftStep(spec *InnerSpec, left *InnerOp, right *InnerOp) bool { + leftidx, err := orderFromPadding(spec, left) + if err != nil { + panic(err) + } + rightidx, err := orderFromPadding(spec, right) + if err != nil { + panic(err) + } + + // TODO: is it possible there are empty (nil) children??? + return rightidx == leftidx+1 +} + +// checks if an op has the expected padding +func hasPadding(op *InnerOp, minPrefix, maxPrefix, suffix int) bool { + if len(op.Prefix) < minPrefix { + return false + } + if len(op.Prefix) > maxPrefix { + return false + } + return len(op.Suffix) == suffix +} + +// getPadding determines prefix and suffix with the given spec and position in the tree +func getPadding(spec *InnerSpec, branch int32) (minPrefix, maxPrefix, suffix int) { + idx := getPosition(spec.ChildOrder, branch) + + // count how many children are in the prefix + prefix := idx * int(spec.ChildSize) + minPrefix = prefix + int(spec.MinPrefixLength) + maxPrefix = prefix + int(spec.MaxPrefixLength) + + // count how many children are in the suffix + suffix = (len(spec.ChildOrder) - 1 - idx) * int(spec.ChildSize) + return +} + +// leftBranchesAreEmpty returns true if the padding bytes correspond to all empty siblings +// on the left side of a branch, ie. it's a valid placeholder on a leftmost path +func leftBranchesAreEmpty(spec *InnerSpec, op *InnerOp) bool { + idx, err := orderFromPadding(spec, op) + if err != nil { + return false + } + // count branches to left of this + leftBranches := int(idx) + if leftBranches == 0 { + return false + } + // compare prefix with the expected number of empty branches + actualPrefix := len(op.Prefix) - leftBranches*int(spec.ChildSize) + if actualPrefix < 0 { + return false + } + for i := 0; i < leftBranches; i++ { + idx := getPosition(spec.ChildOrder, int32(i)) + from := actualPrefix + idx*int(spec.ChildSize) + if !bytes.Equal(spec.EmptyChild, op.Prefix[from:from+int(spec.ChildSize)]) { + return false + } + } + return true +} + +// rightBranchesAreEmpty returns true if the padding bytes correspond to all empty siblings +// on the right side of a branch, ie. it's a valid placeholder on a rightmost path +func rightBranchesAreEmpty(spec *InnerSpec, op *InnerOp) bool { + idx, err := orderFromPadding(spec, op) + if err != nil { + return false + } + // count branches to right of this one + rightBranches := len(spec.ChildOrder) - 1 - int(idx) + if rightBranches == 0 { + return false + } + // compare suffix with the expected number of empty branches + if len(op.Suffix) != rightBranches*int(spec.ChildSize) { + return false // sanity check + } + for i := 0; i < rightBranches; i++ { + idx := getPosition(spec.ChildOrder, int32(i)) + from := idx * int(spec.ChildSize) + if !bytes.Equal(spec.EmptyChild, op.Suffix[from:from+int(spec.ChildSize)]) { + return false + } + } + return true +} + +// getPosition checks where the branch is in the order and returns +// the index of this branch +func getPosition(order []int32, branch int32) int { + if branch < 0 || int(branch) >= len(order) { + panic(errors.Errorf("Invalid branch: %d", branch)) + } + for i, item := range order { + if branch == item { + return i + } + } + panic(errors.Errorf("Branch %d not found in order %v", branch, order)) +} + +// This will look at the proof and determine which order it is... +// So we can see if it is branch 0, 1, 2 etc... to determine neighbors +func orderFromPadding(spec *InnerSpec, inner *InnerOp) (int32, error) { + maxbranch := int32(len(spec.ChildOrder)) + for branch := int32(0); branch < maxbranch; branch++ { + minp, maxp, suffix := getPadding(spec, branch) + if hasPadding(inner, minp, maxp, suffix) { + return branch, nil + } + } + return 0, errors.New("Cannot find any valid spacing for this node") +} + +// over-declares equality, which we cosnider fine for now. +func (p *ProofSpec) SpecEquals(spec *ProofSpec) bool { + return p.LeafSpec.Hash == spec.LeafSpec.Hash && + p.LeafSpec.PrehashKey == spec.LeafSpec.PrehashKey && + p.LeafSpec.PrehashValue == spec.LeafSpec.PrehashValue && + p.LeafSpec.Length == spec.LeafSpec.Length && + p.InnerSpec.Hash == spec.InnerSpec.Hash && + p.InnerSpec.MinPrefixLength == spec.InnerSpec.MinPrefixLength && + p.InnerSpec.MaxPrefixLength == spec.InnerSpec.MaxPrefixLength && + p.InnerSpec.ChildSize == spec.InnerSpec.ChildSize && + len(p.InnerSpec.ChildOrder) == len(spec.InnerSpec.ChildOrder) +} diff --git a/ics23/proofs.pb.go b/ics23/proofs.pb.go new file mode 100644 index 0000000000..a973dcf844 --- /dev/null +++ b/ics23/proofs.pb.go @@ -0,0 +1,4552 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: proofs.proto + +package ics23 + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type HashOp int32 + +const ( + // NO_HASH is the default if no data passed. Note this is an illegal argument some places. + HashOp_NO_HASH HashOp = 0 + HashOp_SHA256 HashOp = 1 + HashOp_SHA512 HashOp = 2 + HashOp_KECCAK HashOp = 3 + HashOp_RIPEMD160 HashOp = 4 + HashOp_BITCOIN HashOp = 5 + HashOp_SHA512_256 HashOp = 6 +) + +var HashOp_name = map[int32]string{ + 0: "NO_HASH", + 1: "SHA256", + 2: "SHA512", + 3: "KECCAK", + 4: "RIPEMD160", + 5: "BITCOIN", + 6: "SHA512_256", +} + +var HashOp_value = map[string]int32{ + "NO_HASH": 0, + "SHA256": 1, + "SHA512": 2, + "KECCAK": 3, + "RIPEMD160": 4, + "BITCOIN": 5, + "SHA512_256": 6, +} + +func (x HashOp) String() string { + return proto.EnumName(HashOp_name, int32(x)) +} + +func (HashOp) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{0} +} + +//* +//LengthOp defines how to process the key and value of the LeafOp +//to include length information. After encoding the length with the given +//algorithm, the length will be prepended to the key and value bytes. +//(Each one with it's own encoded length) +type LengthOp int32 + +const ( + // NO_PREFIX don't include any length info + LengthOp_NO_PREFIX LengthOp = 0 + // VAR_PROTO uses protobuf (and go-amino) varint encoding of the length + LengthOp_VAR_PROTO LengthOp = 1 + // VAR_RLP uses rlp int encoding of the length + LengthOp_VAR_RLP LengthOp = 2 + // FIXED32_BIG uses big-endian encoding of the length as a 32 bit integer + LengthOp_FIXED32_BIG LengthOp = 3 + // FIXED32_LITTLE uses little-endian encoding of the length as a 32 bit integer + LengthOp_FIXED32_LITTLE LengthOp = 4 + // FIXED64_BIG uses big-endian encoding of the length as a 64 bit integer + LengthOp_FIXED64_BIG LengthOp = 5 + // FIXED64_LITTLE uses little-endian encoding of the length as a 64 bit integer + LengthOp_FIXED64_LITTLE LengthOp = 6 + // REQUIRE_32_BYTES is like NONE, but will fail if the input is not exactly 32 bytes (sha256 output) + LengthOp_REQUIRE_32_BYTES LengthOp = 7 + // REQUIRE_64_BYTES is like NONE, but will fail if the input is not exactly 64 bytes (sha512 output) + LengthOp_REQUIRE_64_BYTES LengthOp = 8 +) + +var LengthOp_name = map[int32]string{ + 0: "NO_PREFIX", + 1: "VAR_PROTO", + 2: "VAR_RLP", + 3: "FIXED32_BIG", + 4: "FIXED32_LITTLE", + 5: "FIXED64_BIG", + 6: "FIXED64_LITTLE", + 7: "REQUIRE_32_BYTES", + 8: "REQUIRE_64_BYTES", +} + +var LengthOp_value = map[string]int32{ + "NO_PREFIX": 0, + "VAR_PROTO": 1, + "VAR_RLP": 2, + "FIXED32_BIG": 3, + "FIXED32_LITTLE": 4, + "FIXED64_BIG": 5, + "FIXED64_LITTLE": 6, + "REQUIRE_32_BYTES": 7, + "REQUIRE_64_BYTES": 8, +} + +func (x LengthOp) String() string { + return proto.EnumName(LengthOp_name, int32(x)) +} + +func (LengthOp) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{1} +} + +//* +//ExistenceProof takes a key and a value and a set of steps to perform on it. +//The result of peforming all these steps will provide a "root hash", which can +//be compared to the value in a header. +// +//Since it is computationally infeasible to produce a hash collission for any of the used +//cryptographic hash functions, if someone can provide a series of operations to transform +//a given key and value into a root hash that matches some trusted root, these key and values +//must be in the referenced merkle tree. +// +//The only possible issue is maliablity in LeafOp, such as providing extra prefix data, +//which should be controlled by a spec. Eg. with lengthOp as NONE, +//prefix = FOO, key = BAR, value = CHOICE +//and +//prefix = F, key = OOBAR, value = CHOICE +//would produce the same value. +// +//With LengthOp this is tricker but not impossible. Which is why the "leafPrefixEqual" field +//in the ProofSpec is valuable to prevent this mutability. And why all trees should +//length-prefix the data before hashing it. +type ExistenceProof struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Leaf *LeafOp `protobuf:"bytes,3,opt,name=leaf,proto3" json:"leaf,omitempty"` + Path []*InnerOp `protobuf:"bytes,4,rep,name=path,proto3" json:"path,omitempty"` +} + +func (m *ExistenceProof) Reset() { *m = ExistenceProof{} } +func (m *ExistenceProof) String() string { return proto.CompactTextString(m) } +func (*ExistenceProof) ProtoMessage() {} +func (*ExistenceProof) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{0} +} +func (m *ExistenceProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExistenceProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExistenceProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExistenceProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExistenceProof.Merge(m, src) +} +func (m *ExistenceProof) XXX_Size() int { + return m.Size() +} +func (m *ExistenceProof) XXX_DiscardUnknown() { + xxx_messageInfo_ExistenceProof.DiscardUnknown(m) +} + +var xxx_messageInfo_ExistenceProof proto.InternalMessageInfo + +func (m *ExistenceProof) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *ExistenceProof) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *ExistenceProof) GetLeaf() *LeafOp { + if m != nil { + return m.Leaf + } + return nil +} + +func (m *ExistenceProof) GetPath() []*InnerOp { + if m != nil { + return m.Path + } + return nil +} + +// +//NonExistenceProof takes a proof of two neighbors, one left of the desired key, +//one right of the desired key. If both proofs are valid AND they are neighbors, +//then there is no valid proof for the given key. +type NonExistenceProof struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Left *ExistenceProof `protobuf:"bytes,2,opt,name=left,proto3" json:"left,omitempty"` + Right *ExistenceProof `protobuf:"bytes,3,opt,name=right,proto3" json:"right,omitempty"` +} + +func (m *NonExistenceProof) Reset() { *m = NonExistenceProof{} } +func (m *NonExistenceProof) String() string { return proto.CompactTextString(m) } +func (*NonExistenceProof) ProtoMessage() {} +func (*NonExistenceProof) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{1} +} +func (m *NonExistenceProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonExistenceProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NonExistenceProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NonExistenceProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonExistenceProof.Merge(m, src) +} +func (m *NonExistenceProof) XXX_Size() int { + return m.Size() +} +func (m *NonExistenceProof) XXX_DiscardUnknown() { + xxx_messageInfo_NonExistenceProof.DiscardUnknown(m) +} + +var xxx_messageInfo_NonExistenceProof proto.InternalMessageInfo + +func (m *NonExistenceProof) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *NonExistenceProof) GetLeft() *ExistenceProof { + if m != nil { + return m.Left + } + return nil +} + +func (m *NonExistenceProof) GetRight() *ExistenceProof { + if m != nil { + return m.Right + } + return nil +} + +// +//CommitmentProof is either an ExistenceProof or a NonExistenceProof, or a Batch of such messages +type CommitmentProof struct { + // Types that are valid to be assigned to Proof: + // *CommitmentProof_Exist + // *CommitmentProof_Nonexist + // *CommitmentProof_Batch + // *CommitmentProof_Compressed + Proof isCommitmentProof_Proof `protobuf_oneof:"proof"` +} + +func (m *CommitmentProof) Reset() { *m = CommitmentProof{} } +func (m *CommitmentProof) String() string { return proto.CompactTextString(m) } +func (*CommitmentProof) ProtoMessage() {} +func (*CommitmentProof) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{2} +} +func (m *CommitmentProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommitmentProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CommitmentProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CommitmentProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitmentProof.Merge(m, src) +} +func (m *CommitmentProof) XXX_Size() int { + return m.Size() +} +func (m *CommitmentProof) XXX_DiscardUnknown() { + xxx_messageInfo_CommitmentProof.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitmentProof proto.InternalMessageInfo + +type isCommitmentProof_Proof interface { + isCommitmentProof_Proof() + MarshalTo([]byte) (int, error) + Size() int +} + +type CommitmentProof_Exist struct { + Exist *ExistenceProof `protobuf:"bytes,1,opt,name=exist,proto3,oneof" json:"exist,omitempty"` +} +type CommitmentProof_Nonexist struct { + Nonexist *NonExistenceProof `protobuf:"bytes,2,opt,name=nonexist,proto3,oneof" json:"nonexist,omitempty"` +} +type CommitmentProof_Batch struct { + Batch *BatchProof `protobuf:"bytes,3,opt,name=batch,proto3,oneof" json:"batch,omitempty"` +} +type CommitmentProof_Compressed struct { + Compressed *CompressedBatchProof `protobuf:"bytes,4,opt,name=compressed,proto3,oneof" json:"compressed,omitempty"` +} + +func (*CommitmentProof_Exist) isCommitmentProof_Proof() {} +func (*CommitmentProof_Nonexist) isCommitmentProof_Proof() {} +func (*CommitmentProof_Batch) isCommitmentProof_Proof() {} +func (*CommitmentProof_Compressed) isCommitmentProof_Proof() {} + +func (m *CommitmentProof) GetProof() isCommitmentProof_Proof { + if m != nil { + return m.Proof + } + return nil +} + +func (m *CommitmentProof) GetExist() *ExistenceProof { + if x, ok := m.GetProof().(*CommitmentProof_Exist); ok { + return x.Exist + } + return nil +} + +func (m *CommitmentProof) GetNonexist() *NonExistenceProof { + if x, ok := m.GetProof().(*CommitmentProof_Nonexist); ok { + return x.Nonexist + } + return nil +} + +func (m *CommitmentProof) GetBatch() *BatchProof { + if x, ok := m.GetProof().(*CommitmentProof_Batch); ok { + return x.Batch + } + return nil +} + +func (m *CommitmentProof) GetCompressed() *CompressedBatchProof { + if x, ok := m.GetProof().(*CommitmentProof_Compressed); ok { + return x.Compressed + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*CommitmentProof) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*CommitmentProof_Exist)(nil), + (*CommitmentProof_Nonexist)(nil), + (*CommitmentProof_Batch)(nil), + (*CommitmentProof_Compressed)(nil), + } +} + +//* +//LeafOp represents the raw key-value data we wish to prove, and +//must be flexible to represent the internal transformation from +//the original key-value pairs into the basis hash, for many existing +//merkle trees. +// +//key and value are passed in. So that the signature of this operation is: +//leafOp(key, value) -> output +// +//To process this, first prehash the keys and values if needed (ANY means no hash in this case): +//hkey = prehashKey(key) +//hvalue = prehashValue(value) +// +//Then combine the bytes, and hash it +//output = hash(prefix || length(hkey) || hkey || length(hvalue) || hvalue) +type LeafOp struct { + Hash HashOp `protobuf:"varint,1,opt,name=hash,proto3,enum=ics23.HashOp" json:"hash,omitempty"` + PrehashKey HashOp `protobuf:"varint,2,opt,name=prehash_key,json=prehashKey,proto3,enum=ics23.HashOp" json:"prehash_key,omitempty"` + PrehashValue HashOp `protobuf:"varint,3,opt,name=prehash_value,json=prehashValue,proto3,enum=ics23.HashOp" json:"prehash_value,omitempty"` + Length LengthOp `protobuf:"varint,4,opt,name=length,proto3,enum=ics23.LengthOp" json:"length,omitempty"` + // prefix is a fixed bytes that may optionally be included at the beginning to differentiate + // a leaf node from an inner node. + Prefix []byte `protobuf:"bytes,5,opt,name=prefix,proto3" json:"prefix,omitempty"` +} + +func (m *LeafOp) Reset() { *m = LeafOp{} } +func (m *LeafOp) String() string { return proto.CompactTextString(m) } +func (*LeafOp) ProtoMessage() {} +func (*LeafOp) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{3} +} +func (m *LeafOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeafOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeafOp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeafOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeafOp.Merge(m, src) +} +func (m *LeafOp) XXX_Size() int { + return m.Size() +} +func (m *LeafOp) XXX_DiscardUnknown() { + xxx_messageInfo_LeafOp.DiscardUnknown(m) +} + +var xxx_messageInfo_LeafOp proto.InternalMessageInfo + +func (m *LeafOp) GetHash() HashOp { + if m != nil { + return m.Hash + } + return HashOp_NO_HASH +} + +func (m *LeafOp) GetPrehashKey() HashOp { + if m != nil { + return m.PrehashKey + } + return HashOp_NO_HASH +} + +func (m *LeafOp) GetPrehashValue() HashOp { + if m != nil { + return m.PrehashValue + } + return HashOp_NO_HASH +} + +func (m *LeafOp) GetLength() LengthOp { + if m != nil { + return m.Length + } + return LengthOp_NO_PREFIX +} + +func (m *LeafOp) GetPrefix() []byte { + if m != nil { + return m.Prefix + } + return nil +} + +//* +//InnerOp represents a merkle-proof step that is not a leaf. +//It represents concatenating two children and hashing them to provide the next result. +// +//The result of the previous step is passed in, so the signature of this op is: +//innerOp(child) -> output +// +//The result of applying InnerOp should be: +//output = op.hash(op.prefix || child || op.suffix) +// +//where the || operator is concatenation of binary data, +//and child is the result of hashing all the tree below this step. +// +//Any special data, like prepending child with the length, or prepending the entire operation with +//some value to differentiate from leaf nodes, should be included in prefix and suffix. +//If either of prefix or suffix is empty, we just treat it as an empty string +type InnerOp struct { + Hash HashOp `protobuf:"varint,1,opt,name=hash,proto3,enum=ics23.HashOp" json:"hash,omitempty"` + Prefix []byte `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"` + Suffix []byte `protobuf:"bytes,3,opt,name=suffix,proto3" json:"suffix,omitempty"` +} + +func (m *InnerOp) Reset() { *m = InnerOp{} } +func (m *InnerOp) String() string { return proto.CompactTextString(m) } +func (*InnerOp) ProtoMessage() {} +func (*InnerOp) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{4} +} +func (m *InnerOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InnerOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InnerOp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InnerOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_InnerOp.Merge(m, src) +} +func (m *InnerOp) XXX_Size() int { + return m.Size() +} +func (m *InnerOp) XXX_DiscardUnknown() { + xxx_messageInfo_InnerOp.DiscardUnknown(m) +} + +var xxx_messageInfo_InnerOp proto.InternalMessageInfo + +func (m *InnerOp) GetHash() HashOp { + if m != nil { + return m.Hash + } + return HashOp_NO_HASH +} + +func (m *InnerOp) GetPrefix() []byte { + if m != nil { + return m.Prefix + } + return nil +} + +func (m *InnerOp) GetSuffix() []byte { + if m != nil { + return m.Suffix + } + return nil +} + +//* +//ProofSpec defines what the expected parameters are for a given proof type. +//This can be stored in the client and used to validate any incoming proofs. +// +//verify(ProofSpec, Proof) -> Proof | Error +// +//As demonstrated in tests, if we don't fix the algorithm used to calculate the +//LeafHash for a given tree, there are many possible key-value pairs that can +//generate a given hash (by interpretting the preimage differently). +//We need this for proper security, requires client knows a priori what +//tree format server uses. But not in code, rather a configuration object. +type ProofSpec struct { + // any field in the ExistenceProof must be the same as in this spec. + // except Prefix, which is just the first bytes of prefix (spec can be longer) + LeafSpec *LeafOp `protobuf:"bytes,1,opt,name=leaf_spec,json=leafSpec,proto3" json:"leaf_spec,omitempty"` + InnerSpec *InnerSpec `protobuf:"bytes,2,opt,name=inner_spec,json=innerSpec,proto3" json:"inner_spec,omitempty"` + // max_depth (if > 0) is the maximum number of InnerOps allowed (mainly for fixed-depth tries) + MaxDepth int32 `protobuf:"varint,3,opt,name=max_depth,json=maxDepth,proto3" json:"max_depth,omitempty"` + // min_depth (if > 0) is the minimum number of InnerOps allowed (mainly for fixed-depth tries) + MinDepth int32 `protobuf:"varint,4,opt,name=min_depth,json=minDepth,proto3" json:"min_depth,omitempty"` +} + +func (m *ProofSpec) Reset() { *m = ProofSpec{} } +func (m *ProofSpec) String() string { return proto.CompactTextString(m) } +func (*ProofSpec) ProtoMessage() {} +func (*ProofSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{5} +} +func (m *ProofSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProofSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProofSpec.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProofSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProofSpec.Merge(m, src) +} +func (m *ProofSpec) XXX_Size() int { + return m.Size() +} +func (m *ProofSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ProofSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ProofSpec proto.InternalMessageInfo + +func (m *ProofSpec) GetLeafSpec() *LeafOp { + if m != nil { + return m.LeafSpec + } + return nil +} + +func (m *ProofSpec) GetInnerSpec() *InnerSpec { + if m != nil { + return m.InnerSpec + } + return nil +} + +func (m *ProofSpec) GetMaxDepth() int32 { + if m != nil { + return m.MaxDepth + } + return 0 +} + +func (m *ProofSpec) GetMinDepth() int32 { + if m != nil { + return m.MinDepth + } + return 0 +} + +// +//InnerSpec contains all store-specific structure info to determine if two proofs from a +//given store are neighbors. +// +//This enables: +// +//isLeftMost(spec: InnerSpec, op: InnerOp) +//isRightMost(spec: InnerSpec, op: InnerOp) +//isLeftNeighbor(spec: InnerSpec, left: InnerOp, right: InnerOp) +type InnerSpec struct { + // Child order is the ordering of the children node, must count from 0 + // iavl tree is [0, 1] (left then right) + // merk is [0, 2, 1] (left, right, here) + ChildOrder []int32 `protobuf:"varint,1,rep,packed,name=child_order,json=childOrder,proto3" json:"child_order,omitempty"` + ChildSize int32 `protobuf:"varint,2,opt,name=child_size,json=childSize,proto3" json:"child_size,omitempty"` + MinPrefixLength int32 `protobuf:"varint,3,opt,name=min_prefix_length,json=minPrefixLength,proto3" json:"min_prefix_length,omitempty"` + MaxPrefixLength int32 `protobuf:"varint,4,opt,name=max_prefix_length,json=maxPrefixLength,proto3" json:"max_prefix_length,omitempty"` + // empty child is the prehash image that is used when one child is nil (eg. 20 bytes of 0) + EmptyChild []byte `protobuf:"bytes,5,opt,name=empty_child,json=emptyChild,proto3" json:"empty_child,omitempty"` + // hash is the algorithm that must be used for each InnerOp + Hash HashOp `protobuf:"varint,6,opt,name=hash,proto3,enum=ics23.HashOp" json:"hash,omitempty"` +} + +func (m *InnerSpec) Reset() { *m = InnerSpec{} } +func (m *InnerSpec) String() string { return proto.CompactTextString(m) } +func (*InnerSpec) ProtoMessage() {} +func (*InnerSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{6} +} +func (m *InnerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InnerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InnerSpec.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InnerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_InnerSpec.Merge(m, src) +} +func (m *InnerSpec) XXX_Size() int { + return m.Size() +} +func (m *InnerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_InnerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_InnerSpec proto.InternalMessageInfo + +func (m *InnerSpec) GetChildOrder() []int32 { + if m != nil { + return m.ChildOrder + } + return nil +} + +func (m *InnerSpec) GetChildSize() int32 { + if m != nil { + return m.ChildSize + } + return 0 +} + +func (m *InnerSpec) GetMinPrefixLength() int32 { + if m != nil { + return m.MinPrefixLength + } + return 0 +} + +func (m *InnerSpec) GetMaxPrefixLength() int32 { + if m != nil { + return m.MaxPrefixLength + } + return 0 +} + +func (m *InnerSpec) GetEmptyChild() []byte { + if m != nil { + return m.EmptyChild + } + return nil +} + +func (m *InnerSpec) GetHash() HashOp { + if m != nil { + return m.Hash + } + return HashOp_NO_HASH +} + +// +//BatchProof is a group of multiple proof types than can be compressed +type BatchProof struct { + Entries []*BatchEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (m *BatchProof) Reset() { *m = BatchProof{} } +func (m *BatchProof) String() string { return proto.CompactTextString(m) } +func (*BatchProof) ProtoMessage() {} +func (*BatchProof) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{7} +} +func (m *BatchProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BatchProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BatchProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BatchProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_BatchProof.Merge(m, src) +} +func (m *BatchProof) XXX_Size() int { + return m.Size() +} +func (m *BatchProof) XXX_DiscardUnknown() { + xxx_messageInfo_BatchProof.DiscardUnknown(m) +} + +var xxx_messageInfo_BatchProof proto.InternalMessageInfo + +func (m *BatchProof) GetEntries() []*BatchEntry { + if m != nil { + return m.Entries + } + return nil +} + +// Use BatchEntry not CommitmentProof, to avoid recursion +type BatchEntry struct { + // Types that are valid to be assigned to Proof: + // *BatchEntry_Exist + // *BatchEntry_Nonexist + Proof isBatchEntry_Proof `protobuf_oneof:"proof"` +} + +func (m *BatchEntry) Reset() { *m = BatchEntry{} } +func (m *BatchEntry) String() string { return proto.CompactTextString(m) } +func (*BatchEntry) ProtoMessage() {} +func (*BatchEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{8} +} +func (m *BatchEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BatchEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BatchEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BatchEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_BatchEntry.Merge(m, src) +} +func (m *BatchEntry) XXX_Size() int { + return m.Size() +} +func (m *BatchEntry) XXX_DiscardUnknown() { + xxx_messageInfo_BatchEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_BatchEntry proto.InternalMessageInfo + +type isBatchEntry_Proof interface { + isBatchEntry_Proof() + MarshalTo([]byte) (int, error) + Size() int +} + +type BatchEntry_Exist struct { + Exist *ExistenceProof `protobuf:"bytes,1,opt,name=exist,proto3,oneof" json:"exist,omitempty"` +} +type BatchEntry_Nonexist struct { + Nonexist *NonExistenceProof `protobuf:"bytes,2,opt,name=nonexist,proto3,oneof" json:"nonexist,omitempty"` +} + +func (*BatchEntry_Exist) isBatchEntry_Proof() {} +func (*BatchEntry_Nonexist) isBatchEntry_Proof() {} + +func (m *BatchEntry) GetProof() isBatchEntry_Proof { + if m != nil { + return m.Proof + } + return nil +} + +func (m *BatchEntry) GetExist() *ExistenceProof { + if x, ok := m.GetProof().(*BatchEntry_Exist); ok { + return x.Exist + } + return nil +} + +func (m *BatchEntry) GetNonexist() *NonExistenceProof { + if x, ok := m.GetProof().(*BatchEntry_Nonexist); ok { + return x.Nonexist + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*BatchEntry) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*BatchEntry_Exist)(nil), + (*BatchEntry_Nonexist)(nil), + } +} + +type CompressedBatchProof struct { + Entries []*CompressedBatchEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + LookupInners []*InnerOp `protobuf:"bytes,2,rep,name=lookup_inners,json=lookupInners,proto3" json:"lookup_inners,omitempty"` +} + +func (m *CompressedBatchProof) Reset() { *m = CompressedBatchProof{} } +func (m *CompressedBatchProof) String() string { return proto.CompactTextString(m) } +func (*CompressedBatchProof) ProtoMessage() {} +func (*CompressedBatchProof) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{9} +} +func (m *CompressedBatchProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CompressedBatchProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CompressedBatchProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CompressedBatchProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompressedBatchProof.Merge(m, src) +} +func (m *CompressedBatchProof) XXX_Size() int { + return m.Size() +} +func (m *CompressedBatchProof) XXX_DiscardUnknown() { + xxx_messageInfo_CompressedBatchProof.DiscardUnknown(m) +} + +var xxx_messageInfo_CompressedBatchProof proto.InternalMessageInfo + +func (m *CompressedBatchProof) GetEntries() []*CompressedBatchEntry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *CompressedBatchProof) GetLookupInners() []*InnerOp { + if m != nil { + return m.LookupInners + } + return nil +} + +// Use BatchEntry not CommitmentProof, to avoid recursion +type CompressedBatchEntry struct { + // Types that are valid to be assigned to Proof: + // *CompressedBatchEntry_Exist + // *CompressedBatchEntry_Nonexist + Proof isCompressedBatchEntry_Proof `protobuf_oneof:"proof"` +} + +func (m *CompressedBatchEntry) Reset() { *m = CompressedBatchEntry{} } +func (m *CompressedBatchEntry) String() string { return proto.CompactTextString(m) } +func (*CompressedBatchEntry) ProtoMessage() {} +func (*CompressedBatchEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{10} +} +func (m *CompressedBatchEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CompressedBatchEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CompressedBatchEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CompressedBatchEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompressedBatchEntry.Merge(m, src) +} +func (m *CompressedBatchEntry) XXX_Size() int { + return m.Size() +} +func (m *CompressedBatchEntry) XXX_DiscardUnknown() { + xxx_messageInfo_CompressedBatchEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_CompressedBatchEntry proto.InternalMessageInfo + +type isCompressedBatchEntry_Proof interface { + isCompressedBatchEntry_Proof() + MarshalTo([]byte) (int, error) + Size() int +} + +type CompressedBatchEntry_Exist struct { + Exist *CompressedExistenceProof `protobuf:"bytes,1,opt,name=exist,proto3,oneof" json:"exist,omitempty"` +} +type CompressedBatchEntry_Nonexist struct { + Nonexist *CompressedNonExistenceProof `protobuf:"bytes,2,opt,name=nonexist,proto3,oneof" json:"nonexist,omitempty"` +} + +func (*CompressedBatchEntry_Exist) isCompressedBatchEntry_Proof() {} +func (*CompressedBatchEntry_Nonexist) isCompressedBatchEntry_Proof() {} + +func (m *CompressedBatchEntry) GetProof() isCompressedBatchEntry_Proof { + if m != nil { + return m.Proof + } + return nil +} + +func (m *CompressedBatchEntry) GetExist() *CompressedExistenceProof { + if x, ok := m.GetProof().(*CompressedBatchEntry_Exist); ok { + return x.Exist + } + return nil +} + +func (m *CompressedBatchEntry) GetNonexist() *CompressedNonExistenceProof { + if x, ok := m.GetProof().(*CompressedBatchEntry_Nonexist); ok { + return x.Nonexist + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*CompressedBatchEntry) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*CompressedBatchEntry_Exist)(nil), + (*CompressedBatchEntry_Nonexist)(nil), + } +} + +type CompressedExistenceProof struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Leaf *LeafOp `protobuf:"bytes,3,opt,name=leaf,proto3" json:"leaf,omitempty"` + // these are indexes into the lookup_inners table in CompressedBatchProof + Path []int32 `protobuf:"varint,4,rep,packed,name=path,proto3" json:"path,omitempty"` +} + +func (m *CompressedExistenceProof) Reset() { *m = CompressedExistenceProof{} } +func (m *CompressedExistenceProof) String() string { return proto.CompactTextString(m) } +func (*CompressedExistenceProof) ProtoMessage() {} +func (*CompressedExistenceProof) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{11} +} +func (m *CompressedExistenceProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CompressedExistenceProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CompressedExistenceProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CompressedExistenceProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompressedExistenceProof.Merge(m, src) +} +func (m *CompressedExistenceProof) XXX_Size() int { + return m.Size() +} +func (m *CompressedExistenceProof) XXX_DiscardUnknown() { + xxx_messageInfo_CompressedExistenceProof.DiscardUnknown(m) +} + +var xxx_messageInfo_CompressedExistenceProof proto.InternalMessageInfo + +func (m *CompressedExistenceProof) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *CompressedExistenceProof) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *CompressedExistenceProof) GetLeaf() *LeafOp { + if m != nil { + return m.Leaf + } + return nil +} + +func (m *CompressedExistenceProof) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +type CompressedNonExistenceProof struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Left *CompressedExistenceProof `protobuf:"bytes,2,opt,name=left,proto3" json:"left,omitempty"` + Right *CompressedExistenceProof `protobuf:"bytes,3,opt,name=right,proto3" json:"right,omitempty"` +} + +func (m *CompressedNonExistenceProof) Reset() { *m = CompressedNonExistenceProof{} } +func (m *CompressedNonExistenceProof) String() string { return proto.CompactTextString(m) } +func (*CompressedNonExistenceProof) ProtoMessage() {} +func (*CompressedNonExistenceProof) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{12} +} +func (m *CompressedNonExistenceProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CompressedNonExistenceProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CompressedNonExistenceProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CompressedNonExistenceProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompressedNonExistenceProof.Merge(m, src) +} +func (m *CompressedNonExistenceProof) XXX_Size() int { + return m.Size() +} +func (m *CompressedNonExistenceProof) XXX_DiscardUnknown() { + xxx_messageInfo_CompressedNonExistenceProof.DiscardUnknown(m) +} + +var xxx_messageInfo_CompressedNonExistenceProof proto.InternalMessageInfo + +func (m *CompressedNonExistenceProof) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *CompressedNonExistenceProof) GetLeft() *CompressedExistenceProof { + if m != nil { + return m.Left + } + return nil +} + +func (m *CompressedNonExistenceProof) GetRight() *CompressedExistenceProof { + if m != nil { + return m.Right + } + return nil +} + +func init() { + proto.RegisterEnum("ics23.HashOp", HashOp_name, HashOp_value) + proto.RegisterEnum("ics23.LengthOp", LengthOp_name, LengthOp_value) + proto.RegisterType((*ExistenceProof)(nil), "ics23.ExistenceProof") + proto.RegisterType((*NonExistenceProof)(nil), "ics23.NonExistenceProof") + proto.RegisterType((*CommitmentProof)(nil), "ics23.CommitmentProof") + proto.RegisterType((*LeafOp)(nil), "ics23.LeafOp") + proto.RegisterType((*InnerOp)(nil), "ics23.InnerOp") + proto.RegisterType((*ProofSpec)(nil), "ics23.ProofSpec") + proto.RegisterType((*InnerSpec)(nil), "ics23.InnerSpec") + proto.RegisterType((*BatchProof)(nil), "ics23.BatchProof") + proto.RegisterType((*BatchEntry)(nil), "ics23.BatchEntry") + proto.RegisterType((*CompressedBatchProof)(nil), "ics23.CompressedBatchProof") + proto.RegisterType((*CompressedBatchEntry)(nil), "ics23.CompressedBatchEntry") + proto.RegisterType((*CompressedExistenceProof)(nil), "ics23.CompressedExistenceProof") + proto.RegisterType((*CompressedNonExistenceProof)(nil), "ics23.CompressedNonExistenceProof") +} + +func init() { proto.RegisterFile("proofs.proto", fileDescriptor_855156e15e7b8e99) } + +var fileDescriptor_855156e15e7b8e99 = []byte{ + // 940 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4b, 0x6f, 0xe3, 0x54, + 0x14, 0xce, 0x4d, 0x62, 0x27, 0x39, 0x69, 0x53, 0xf7, 0xaa, 0x20, 0x4b, 0x15, 0x69, 0xf1, 0x86, + 0x4e, 0x47, 0x14, 0x26, 0x99, 0x06, 0xb1, 0x40, 0xa2, 0x49, 0x3d, 0xc4, 0x6a, 0x69, 0xc2, 0x4d, + 0x18, 0x0d, 0x12, 0x92, 0xe5, 0x49, 0x6f, 0x26, 0xd6, 0x24, 0xb6, 0x65, 0xbb, 0x28, 0x19, 0x81, + 0x90, 0xf8, 0x05, 0xac, 0x61, 0xc7, 0x96, 0x3f, 0xc2, 0xb2, 0x4b, 0x96, 0xa8, 0x5d, 0xb0, 0xe0, + 0x4f, 0xa0, 0xfb, 0x88, 0x9b, 0x27, 0xed, 0x02, 0xcd, 0xee, 0x9e, 0xef, 0x7c, 0xe7, 0x71, 0xcf, + 0xc3, 0xd7, 0xb0, 0x11, 0x84, 0xbe, 0xdf, 0x8f, 0x8e, 0x82, 0xd0, 0x8f, 0x7d, 0xac, 0xb8, 0xbd, + 0xa8, 0x52, 0x35, 0x7e, 0x84, 0x92, 0x39, 0x76, 0xa3, 0x98, 0x7a, 0x3d, 0xda, 0x66, 0x7a, 0xac, + 0x41, 0xe6, 0x35, 0x9d, 0xe8, 0x68, 0x1f, 0x1d, 0x6c, 0x10, 0x76, 0xc4, 0x3b, 0xa0, 0x7c, 0xe7, + 0x0c, 0xaf, 0xa8, 0x9e, 0xe6, 0x98, 0x10, 0xf0, 0xfb, 0x90, 0x1d, 0x52, 0xa7, 0xaf, 0x67, 0xf6, + 0xd1, 0x41, 0xb1, 0xb2, 0x79, 0xc4, 0xfd, 0x1d, 0x9d, 0x53, 0xa7, 0xdf, 0x0a, 0x08, 0x57, 0x61, + 0x03, 0xb2, 0x81, 0x13, 0x0f, 0xf4, 0xec, 0x7e, 0xe6, 0xa0, 0x58, 0x29, 0x49, 0x8a, 0xe5, 0x79, + 0x34, 0x64, 0x1c, 0xa6, 0x33, 0x7e, 0x80, 0xed, 0x0b, 0xdf, 0xbb, 0x37, 0x87, 0x47, 0x2c, 0x5a, + 0x3f, 0xe6, 0x29, 0x14, 0x2b, 0xef, 0x48, 0x57, 0xf3, 0x66, 0x84, 0x53, 0xf0, 0x63, 0x50, 0x42, + 0xf7, 0xd5, 0x20, 0x96, 0x99, 0xad, 0xe1, 0x0a, 0x8e, 0xf1, 0x0f, 0x82, 0xad, 0x86, 0x3f, 0x1a, + 0xb9, 0xf1, 0x88, 0x7a, 0xb1, 0x88, 0xfe, 0x21, 0x28, 0x94, 0x91, 0x79, 0xfc, 0x75, 0x0e, 0x9a, + 0x29, 0x22, 0x58, 0xb8, 0x06, 0x79, 0xcf, 0xf7, 0x84, 0x85, 0x48, 0x4f, 0x97, 0x16, 0x4b, 0x17, + 0x6b, 0xa6, 0x48, 0xc2, 0xc5, 0x8f, 0x40, 0x79, 0xe9, 0xc4, 0xbd, 0x81, 0xcc, 0x73, 0x5b, 0x1a, + 0xd5, 0x19, 0x96, 0x84, 0xe0, 0x0c, 0xfc, 0x19, 0x40, 0xcf, 0x1f, 0x05, 0x21, 0x8d, 0x22, 0x7a, + 0xa9, 0x67, 0x39, 0x7f, 0x57, 0xf2, 0x1b, 0x89, 0x62, 0xce, 0x72, 0xc6, 0xa0, 0x9e, 0x03, 0x85, + 0xf7, 0xde, 0xb8, 0x46, 0xa0, 0x8a, 0x0e, 0xb1, 0xf6, 0x0d, 0x9c, 0x68, 0xc0, 0xef, 0x58, 0x4a, + 0xda, 0xd7, 0x74, 0xa2, 0x01, 0x6b, 0x0d, 0x53, 0xe1, 0x23, 0x28, 0x06, 0x21, 0x65, 0x47, 0x9b, + 0x75, 0x23, 0xbd, 0x8a, 0x09, 0x92, 0x71, 0x46, 0x27, 0xb8, 0x02, 0x9b, 0x53, 0xbe, 0x98, 0x97, + 0xcc, 0x2a, 0x8b, 0x0d, 0xc9, 0x79, 0xce, 0xa7, 0xe8, 0x03, 0x50, 0x87, 0xd4, 0x7b, 0xc5, 0x87, + 0x84, 0x91, 0xb7, 0x92, 0x39, 0x62, 0x60, 0x2b, 0x20, 0x52, 0x8d, 0xdf, 0x05, 0x35, 0x08, 0x69, + 0xdf, 0x1d, 0xeb, 0x0a, 0x9f, 0x0a, 0x29, 0x19, 0xdf, 0x42, 0x4e, 0x0e, 0xd4, 0x43, 0xae, 0x74, + 0xe7, 0x25, 0x3d, 0xeb, 0x85, 0xe1, 0xd1, 0x55, 0x9f, 0xe1, 0x19, 0x81, 0x0b, 0xc9, 0xf8, 0x0d, + 0x41, 0x81, 0x57, 0xb4, 0x13, 0xd0, 0x1e, 0x3e, 0x84, 0x02, 0x9b, 0x6b, 0x3b, 0x0a, 0x68, 0x4f, + 0x0e, 0xc7, 0xc2, 0xdc, 0xe7, 0x99, 0x9e, 0x73, 0x3f, 0x02, 0x70, 0x59, 0x5e, 0x82, 0x2c, 0xe6, + 0x42, 0x9b, 0xdd, 0x00, 0xc6, 0x22, 0x05, 0x77, 0x7a, 0xc4, 0xbb, 0x50, 0x18, 0x39, 0x63, 0xfb, + 0x92, 0x06, 0xb1, 0x18, 0x09, 0x85, 0xe4, 0x47, 0xce, 0xf8, 0x94, 0xc9, 0x5c, 0xe9, 0x7a, 0x52, + 0x99, 0x95, 0x4a, 0xd7, 0xe3, 0x4a, 0xe3, 0x6f, 0x04, 0x85, 0xc4, 0x25, 0xde, 0x83, 0x62, 0x6f, + 0xe0, 0x0e, 0x2f, 0x6d, 0x3f, 0xbc, 0xa4, 0xa1, 0x8e, 0xf6, 0x33, 0x07, 0x0a, 0x01, 0x0e, 0xb5, + 0x18, 0x82, 0xdf, 0x03, 0x21, 0xd9, 0x91, 0xfb, 0x46, 0xec, 0xb4, 0x42, 0x0a, 0x1c, 0xe9, 0xb8, + 0x6f, 0x28, 0x3e, 0x84, 0x6d, 0x16, 0x4a, 0x14, 0xc6, 0x96, 0xcd, 0x11, 0xf9, 0x6c, 0x8d, 0x5c, + 0xaf, 0xcd, 0x71, 0xd1, 0x1e, 0xce, 0x75, 0xc6, 0x0b, 0xdc, 0xac, 0xe4, 0x3a, 0xe3, 0x39, 0xee, + 0x1e, 0x14, 0xe9, 0x28, 0x88, 0x27, 0x36, 0x0f, 0x25, 0xbb, 0x08, 0x1c, 0x6a, 0x30, 0x24, 0x69, + 0x9f, 0xba, 0xb6, 0x7d, 0xc6, 0xa7, 0x00, 0x77, 0x43, 0x8e, 0x1f, 0x43, 0x8e, 0x7a, 0x71, 0xe8, + 0xd2, 0x88, 0xdf, 0x72, 0x61, 0x85, 0x4c, 0x2f, 0x0e, 0x27, 0x64, 0xca, 0x30, 0xbe, 0x97, 0xa6, + 0x1c, 0x7e, 0x4b, 0x2b, 0x7e, 0xb7, 0x78, 0x3f, 0x21, 0xd8, 0x59, 0xb5, 0xa8, 0xf8, 0x78, 0xf1, + 0x0e, 0x6b, 0xd6, 0x7a, 0xfe, 0x36, 0xb8, 0x0a, 0x9b, 0x43, 0xdf, 0x7f, 0x7d, 0x15, 0xd8, 0x7c, + 0x80, 0x22, 0x3d, 0xbd, 0xf2, 0x13, 0xbb, 0x21, 0x48, 0x5c, 0x8c, 0x8c, 0x5f, 0x96, 0x93, 0x10, + 0xd5, 0xf8, 0x64, 0xbe, 0x1a, 0x7b, 0x4b, 0x29, 0xac, 0xab, 0xcb, 0xe7, 0x4b, 0x75, 0x31, 0x96, + 0x6c, 0x1f, 0x58, 0xa1, 0x09, 0xe8, 0xeb, 0xe2, 0xfd, 0x9f, 0x4f, 0x12, 0x9e, 0x79, 0x92, 0x14, + 0xf9, 0x04, 0xfd, 0x8a, 0x60, 0xf7, 0x3f, 0xf2, 0x5d, 0x11, 0xbe, 0x3a, 0xf7, 0x1a, 0xdd, 0x57, + 0x2f, 0xf9, 0x2e, 0x1d, 0xcf, 0xbf, 0x4b, 0xf7, 0x5a, 0x09, 0xf6, 0x21, 0x05, 0x55, 0xec, 0x00, + 0x2e, 0x42, 0xee, 0xa2, 0x65, 0x37, 0x4f, 0x3a, 0x4d, 0x2d, 0x85, 0x01, 0xd4, 0x4e, 0xf3, 0xa4, + 0x72, 0x5c, 0xd3, 0x90, 0x3c, 0x1f, 0x3f, 0xa9, 0x68, 0x69, 0x76, 0x3e, 0x33, 0x1b, 0x8d, 0x93, + 0x33, 0x2d, 0x83, 0x37, 0xa1, 0x40, 0xac, 0xb6, 0xf9, 0xe5, 0xe9, 0x93, 0xda, 0xc7, 0x5a, 0x96, + 0xd9, 0xd7, 0xad, 0x6e, 0xa3, 0x65, 0x5d, 0x68, 0x0a, 0x2e, 0x01, 0x08, 0x1b, 0x9b, 0xf9, 0x50, + 0x0f, 0x7f, 0x47, 0x90, 0x9f, 0x7e, 0x74, 0x99, 0xe1, 0x45, 0xcb, 0x6e, 0x13, 0xf3, 0x99, 0xf5, + 0x42, 0x4b, 0x31, 0xf1, 0xf9, 0x09, 0xb1, 0xdb, 0xa4, 0xd5, 0x6d, 0x69, 0x88, 0xf9, 0x61, 0x22, + 0x39, 0x6f, 0x6b, 0x69, 0xbc, 0x05, 0xc5, 0x67, 0xd6, 0x0b, 0xf3, 0xb4, 0x5a, 0xb1, 0xeb, 0xd6, + 0x17, 0x5a, 0x06, 0x63, 0x28, 0x4d, 0x81, 0x73, 0xab, 0xdb, 0x3d, 0x37, 0xb5, 0x6c, 0x42, 0xaa, + 0x3d, 0xe5, 0x24, 0x25, 0x21, 0xd5, 0x9e, 0x4e, 0x49, 0x2a, 0xde, 0x01, 0x8d, 0x98, 0x5f, 0x7d, + 0x6d, 0x11, 0xd3, 0x66, 0xce, 0xbe, 0xe9, 0x9a, 0x1d, 0x2d, 0x37, 0x8b, 0x32, 0x6b, 0x8e, 0xe6, + 0xeb, 0xfa, 0x1f, 0x37, 0x65, 0x74, 0x7d, 0x53, 0x46, 0x7f, 0xdd, 0x94, 0xd1, 0xcf, 0xb7, 0xe5, + 0xd4, 0xf5, 0x6d, 0x39, 0xf5, 0xe7, 0x6d, 0x39, 0xf5, 0x52, 0xe5, 0xbf, 0x37, 0xd5, 0x7f, 0x03, + 0x00, 0x00, 0xff, 0xff, 0xd4, 0x7d, 0x87, 0x8f, 0xee, 0x08, 0x00, 0x00, +} + +func (m *ExistenceProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExistenceProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExistenceProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Path) > 0 { + for iNdEx := len(m.Path) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Path[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.Leaf != nil { + { + size, err := m.Leaf.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintProofs(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintProofs(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NonExistenceProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonExistenceProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonExistenceProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Right != nil { + { + size, err := m.Right.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Left != nil { + { + size, err := m.Left.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintProofs(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommitmentProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommitmentProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitmentProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Proof != nil { + { + size := m.Proof.Size() + i -= size + if _, err := m.Proof.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *CommitmentProof_Exist) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitmentProof_Exist) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Exist != nil { + { + size, err := m.Exist.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *CommitmentProof_Nonexist) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitmentProof_Nonexist) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Nonexist != nil { + { + size, err := m.Nonexist.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *CommitmentProof_Batch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitmentProof_Batch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Batch != nil { + { + size, err := m.Batch.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *CommitmentProof_Compressed) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitmentProof_Compressed) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Compressed != nil { + { + size, err := m.Compressed.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *LeafOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeafOp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeafOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Prefix) > 0 { + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = encodeVarintProofs(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0x2a + } + if m.Length != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x20 + } + if m.PrehashValue != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.PrehashValue)) + i-- + dAtA[i] = 0x18 + } + if m.PrehashKey != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.PrehashKey)) + i-- + dAtA[i] = 0x10 + } + if m.Hash != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.Hash)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *InnerOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InnerOp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InnerOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Suffix) > 0 { + i -= len(m.Suffix) + copy(dAtA[i:], m.Suffix) + i = encodeVarintProofs(dAtA, i, uint64(len(m.Suffix))) + i-- + dAtA[i] = 0x1a + } + if len(m.Prefix) > 0 { + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = encodeVarintProofs(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0x12 + } + if m.Hash != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.Hash)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ProofSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProofSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProofSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MinDepth != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.MinDepth)) + i-- + dAtA[i] = 0x20 + } + if m.MaxDepth != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.MaxDepth)) + i-- + dAtA[i] = 0x18 + } + if m.InnerSpec != nil { + { + size, err := m.InnerSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.LeafSpec != nil { + { + size, err := m.LeafSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *InnerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InnerSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InnerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Hash != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.Hash)) + i-- + dAtA[i] = 0x30 + } + if len(m.EmptyChild) > 0 { + i -= len(m.EmptyChild) + copy(dAtA[i:], m.EmptyChild) + i = encodeVarintProofs(dAtA, i, uint64(len(m.EmptyChild))) + i-- + dAtA[i] = 0x2a + } + if m.MaxPrefixLength != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.MaxPrefixLength)) + i-- + dAtA[i] = 0x20 + } + if m.MinPrefixLength != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.MinPrefixLength)) + i-- + dAtA[i] = 0x18 + } + if m.ChildSize != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.ChildSize)) + i-- + dAtA[i] = 0x10 + } + if len(m.ChildOrder) > 0 { + dAtA11 := make([]byte, len(m.ChildOrder)*10) + var j10 int + for _, num1 := range m.ChildOrder { + num := uint64(num1) + for num >= 1<<7 { + dAtA11[j10] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j10++ + } + dAtA11[j10] = uint8(num) + j10++ + } + i -= j10 + copy(dAtA[i:], dAtA11[:j10]) + i = encodeVarintProofs(dAtA, i, uint64(j10)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BatchProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BatchProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BatchProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Entries) > 0 { + for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BatchEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BatchEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BatchEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Proof != nil { + { + size := m.Proof.Size() + i -= size + if _, err := m.Proof.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *BatchEntry_Exist) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BatchEntry_Exist) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Exist != nil { + { + size, err := m.Exist.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *BatchEntry_Nonexist) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BatchEntry_Nonexist) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Nonexist != nil { + { + size, err := m.Nonexist.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *CompressedBatchProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompressedBatchProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CompressedBatchProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LookupInners) > 0 { + for iNdEx := len(m.LookupInners) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.LookupInners[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Entries) > 0 { + for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CompressedBatchEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompressedBatchEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CompressedBatchEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Proof != nil { + { + size := m.Proof.Size() + i -= size + if _, err := m.Proof.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *CompressedBatchEntry_Exist) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CompressedBatchEntry_Exist) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Exist != nil { + { + size, err := m.Exist.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *CompressedBatchEntry_Nonexist) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CompressedBatchEntry_Nonexist) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Nonexist != nil { + { + size, err := m.Nonexist.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *CompressedExistenceProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompressedExistenceProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CompressedExistenceProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Path) > 0 { + dAtA17 := make([]byte, len(m.Path)*10) + var j16 int + for _, num1 := range m.Path { + num := uint64(num1) + for num >= 1<<7 { + dAtA17[j16] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j16++ + } + dAtA17[j16] = uint8(num) + j16++ + } + i -= j16 + copy(dAtA[i:], dAtA17[:j16]) + i = encodeVarintProofs(dAtA, i, uint64(j16)) + i-- + dAtA[i] = 0x22 + } + if m.Leaf != nil { + { + size, err := m.Leaf.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintProofs(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintProofs(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CompressedNonExistenceProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompressedNonExistenceProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CompressedNonExistenceProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Right != nil { + { + size, err := m.Right.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Left != nil { + { + size, err := m.Left.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintProofs(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintProofs(dAtA []byte, offset int, v uint64) int { + offset -= sovProofs(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ExistenceProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + if m.Leaf != nil { + l = m.Leaf.Size() + n += 1 + l + sovProofs(uint64(l)) + } + if len(m.Path) > 0 { + for _, e := range m.Path { + l = e.Size() + n += 1 + l + sovProofs(uint64(l)) + } + } + return n +} + +func (m *NonExistenceProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + if m.Left != nil { + l = m.Left.Size() + n += 1 + l + sovProofs(uint64(l)) + } + if m.Right != nil { + l = m.Right.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} + +func (m *CommitmentProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Proof != nil { + n += m.Proof.Size() + } + return n +} + +func (m *CommitmentProof_Exist) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exist != nil { + l = m.Exist.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} +func (m *CommitmentProof_Nonexist) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Nonexist != nil { + l = m.Nonexist.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} +func (m *CommitmentProof_Batch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Batch != nil { + l = m.Batch.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} +func (m *CommitmentProof_Compressed) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Compressed != nil { + l = m.Compressed.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} +func (m *LeafOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Hash != 0 { + n += 1 + sovProofs(uint64(m.Hash)) + } + if m.PrehashKey != 0 { + n += 1 + sovProofs(uint64(m.PrehashKey)) + } + if m.PrehashValue != 0 { + n += 1 + sovProofs(uint64(m.PrehashValue)) + } + if m.Length != 0 { + n += 1 + sovProofs(uint64(m.Length)) + } + l = len(m.Prefix) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + return n +} + +func (m *InnerOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Hash != 0 { + n += 1 + sovProofs(uint64(m.Hash)) + } + l = len(m.Prefix) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + l = len(m.Suffix) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + return n +} + +func (m *ProofSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LeafSpec != nil { + l = m.LeafSpec.Size() + n += 1 + l + sovProofs(uint64(l)) + } + if m.InnerSpec != nil { + l = m.InnerSpec.Size() + n += 1 + l + sovProofs(uint64(l)) + } + if m.MaxDepth != 0 { + n += 1 + sovProofs(uint64(m.MaxDepth)) + } + if m.MinDepth != 0 { + n += 1 + sovProofs(uint64(m.MinDepth)) + } + return n +} + +func (m *InnerSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ChildOrder) > 0 { + l = 0 + for _, e := range m.ChildOrder { + l += sovProofs(uint64(e)) + } + n += 1 + sovProofs(uint64(l)) + l + } + if m.ChildSize != 0 { + n += 1 + sovProofs(uint64(m.ChildSize)) + } + if m.MinPrefixLength != 0 { + n += 1 + sovProofs(uint64(m.MinPrefixLength)) + } + if m.MaxPrefixLength != 0 { + n += 1 + sovProofs(uint64(m.MaxPrefixLength)) + } + l = len(m.EmptyChild) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + if m.Hash != 0 { + n += 1 + sovProofs(uint64(m.Hash)) + } + return n +} + +func (m *BatchProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovProofs(uint64(l)) + } + } + return n +} + +func (m *BatchEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Proof != nil { + n += m.Proof.Size() + } + return n +} + +func (m *BatchEntry_Exist) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exist != nil { + l = m.Exist.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} +func (m *BatchEntry_Nonexist) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Nonexist != nil { + l = m.Nonexist.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} +func (m *CompressedBatchProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovProofs(uint64(l)) + } + } + if len(m.LookupInners) > 0 { + for _, e := range m.LookupInners { + l = e.Size() + n += 1 + l + sovProofs(uint64(l)) + } + } + return n +} + +func (m *CompressedBatchEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Proof != nil { + n += m.Proof.Size() + } + return n +} + +func (m *CompressedBatchEntry_Exist) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exist != nil { + l = m.Exist.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} +func (m *CompressedBatchEntry_Nonexist) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Nonexist != nil { + l = m.Nonexist.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} +func (m *CompressedExistenceProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + if m.Leaf != nil { + l = m.Leaf.Size() + n += 1 + l + sovProofs(uint64(l)) + } + if len(m.Path) > 0 { + l = 0 + for _, e := range m.Path { + l += sovProofs(uint64(e)) + } + n += 1 + sovProofs(uint64(l)) + l + } + return n +} + +func (m *CompressedNonExistenceProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + if m.Left != nil { + l = m.Left.Size() + n += 1 + l + sovProofs(uint64(l)) + } + if m.Right != nil { + l = m.Right.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} + +func sovProofs(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozProofs(x uint64) (n int) { + return sovProofs(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ExistenceProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExistenceProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExistenceProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leaf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leaf == nil { + m.Leaf = &LeafOp{} + } + if err := m.Leaf.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path, &InnerOp{}) + if err := m.Path[len(m.Path)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonExistenceProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonExistenceProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonExistenceProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Left", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Left == nil { + m.Left = &ExistenceProof{} + } + if err := m.Left.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Right", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Right == nil { + m.Right = &ExistenceProof{} + } + if err := m.Right.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommitmentProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommitmentProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommitmentProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exist", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ExistenceProof{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Proof = &CommitmentProof_Exist{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nonexist", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NonExistenceProof{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Proof = &CommitmentProof_Nonexist{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Batch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &BatchProof{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Proof = &CommitmentProof_Batch{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Compressed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &CompressedBatchProof{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Proof = &CommitmentProof_Compressed{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeafOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeafOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeafOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + m.Hash = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hash |= HashOp(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrehashKey", wireType) + } + m.PrehashKey = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PrehashKey |= HashOp(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrehashValue", wireType) + } + m.PrehashValue = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PrehashValue |= HashOp(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= LengthOp(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prefix = append(m.Prefix[:0], dAtA[iNdEx:postIndex]...) + if m.Prefix == nil { + m.Prefix = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InnerOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InnerOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InnerOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + m.Hash = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hash |= HashOp(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prefix = append(m.Prefix[:0], dAtA[iNdEx:postIndex]...) + if m.Prefix == nil { + m.Prefix = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Suffix", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Suffix = append(m.Suffix[:0], dAtA[iNdEx:postIndex]...) + if m.Suffix == nil { + m.Suffix = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProofSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProofSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProofSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeafSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeafSpec == nil { + m.LeafSpec = &LeafOp{} + } + if err := m.LeafSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InnerSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InnerSpec == nil { + m.InnerSpec = &InnerSpec{} + } + if err := m.InnerSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxDepth", wireType) + } + m.MaxDepth = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxDepth |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinDepth", wireType) + } + m.MinDepth = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinDepth |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InnerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InnerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InnerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ChildOrder = append(m.ChildOrder, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.ChildOrder) == 0 { + m.ChildOrder = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ChildOrder = append(m.ChildOrder, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field ChildOrder", wireType) + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ChildSize", wireType) + } + m.ChildSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ChildSize |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinPrefixLength", wireType) + } + m.MinPrefixLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinPrefixLength |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPrefixLength", wireType) + } + m.MaxPrefixLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxPrefixLength |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EmptyChild", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EmptyChild = append(m.EmptyChild[:0], dAtA[iNdEx:postIndex]...) + if m.EmptyChild == nil { + m.EmptyChild = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + m.Hash = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hash |= HashOp(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BatchProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BatchProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BatchProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, &BatchEntry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BatchEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BatchEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BatchEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exist", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ExistenceProof{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Proof = &BatchEntry_Exist{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nonexist", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NonExistenceProof{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Proof = &BatchEntry_Nonexist{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompressedBatchProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompressedBatchProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompressedBatchProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, &CompressedBatchEntry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LookupInners", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LookupInners = append(m.LookupInners, &InnerOp{}) + if err := m.LookupInners[len(m.LookupInners)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompressedBatchEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompressedBatchEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompressedBatchEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exist", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &CompressedExistenceProof{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Proof = &CompressedBatchEntry_Exist{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nonexist", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &CompressedNonExistenceProof{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Proof = &CompressedBatchEntry_Nonexist{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompressedExistenceProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompressedExistenceProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompressedExistenceProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leaf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leaf == nil { + m.Leaf = &LeafOp{} + } + if err := m.Leaf.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Path = append(m.Path, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Path) == 0 { + m.Path = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Path = append(m.Path, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompressedNonExistenceProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompressedNonExistenceProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompressedNonExistenceProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Left", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Left == nil { + m.Left = &CompressedExistenceProof{} + } + if err := m.Left.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Right", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Right == nil { + m.Right = &CompressedExistenceProof{} + } + if err := m.Right.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipProofs(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProofs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProofs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProofs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthProofs + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupProofs + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthProofs + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthProofs = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowProofs = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupProofs = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/cosmos/staking/v1beta1/tx.proto b/proto/cosmos/staking/v1beta1/tx.proto index 7b05d89eea..d074fe010e 100644 --- a/proto/cosmos/staking/v1beta1/tx.proto +++ b/proto/cosmos/staking/v1beta1/tx.proto @@ -81,8 +81,7 @@ message MsgEditValidatorResponse {} // MsgDelegate defines a SDK message for performing a delegation of coins // from a delegator to a validator. message MsgDelegate { - option (gogoproto.equal) = false; - option (gogoproto.goproto_getters) = false; + option (gogoproto.equal) = false; string delegator_address = 1 [(gogoproto.moretags) = "yaml:\"delegator_address\""]; string validator_address = 2 [(gogoproto.moretags) = "yaml:\"validator_address\""]; @@ -95,8 +94,7 @@ message MsgDelegateResponse {} // MsgBeginRedelegate defines a SDK message for performing a redelegation // of coins from a delegator and source validator to a destination validator. message MsgBeginRedelegate { - option (gogoproto.equal) = false; - option (gogoproto.goproto_getters) = false; + option (gogoproto.equal) = false; string delegator_address = 1 [(gogoproto.moretags) = "yaml:\"delegator_address\""]; string validator_src_address = 2 [(gogoproto.moretags) = "yaml:\"validator_src_address\""]; @@ -112,8 +110,7 @@ message MsgBeginRedelegateResponse { // MsgUndelegate defines a SDK message for performing an undelegation from a // delegate and a validator. message MsgUndelegate { - option (gogoproto.equal) = false; - option (gogoproto.goproto_getters) = false; + option (gogoproto.equal) = false; string delegator_address = 1 [(gogoproto.moretags) = "yaml:\"delegator_address\""]; string validator_address = 2 [(gogoproto.moretags) = "yaml:\"validator_address\""]; diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh index 20b8d70200..f02f9cc81e 100755 --- a/scripts/protocgen.sh +++ b/scripts/protocgen.sh @@ -38,6 +38,9 @@ go mod tidy buf protoc -I "proto" -I "third_party/proto" -I "testutil/testdata" --gocosmos_out=plugins=interfacetype+grpc,\ Mgoogle/protobuf/any.proto=github.com/cosmos/cosmos-sdk/codec/types:. ./testutil/testdata/*.proto +# generate baseapp test messages +(cd baseapp/testutil; buf generate) + # move proto files to the right places cp -r github.com/cosmos/cosmos-sdk/* ./ rm -rf github.com diff --git a/server/config/config.go b/server/config/config.go index 56751552ea..e7dc56ae83 100644 --- a/server/config/config.go +++ b/server/config/config.go @@ -69,8 +69,12 @@ type BaseConfig struct { // IndexEvents defines the set of events in the form {eventType}.{attributeKey}, // which informs Tendermint what to index. If empty, all events will be indexed. IndexEvents []string `mapstructure:"index-events"` + // IavlCacheSize set the size of the iavl tree cache. IAVLCacheSize uint64 `mapstructure:"iavl-cache-size"` + + // IAVLDisableFastNode enables or disables the fast sync node. + IAVLDisableFastNode bool `mapstructure:"iavl-disable-fastnode"` } // APIConfig defines the API listener configuration. @@ -203,15 +207,16 @@ func (c *Config) GetMinGasPrices() sdk.DecCoins { func DefaultConfig() *Config { return &Config{ BaseConfig: BaseConfig{ - MinGasPrices: defaultMinGasPrices, - InterBlockCache: true, - Pruning: storetypes.PruningOptionDefault, - PruningKeepRecent: "0", - PruningKeepEvery: "0", - PruningInterval: "0", - MinRetainBlocks: 0, - IndexEvents: make([]string, 0), - IAVLCacheSize: 781250, // 50 MB + MinGasPrices: defaultMinGasPrices, + InterBlockCache: true, + Pruning: storetypes.PruningOptionDefault, + PruningKeepRecent: "0", + PruningKeepEvery: "0", + PruningInterval: "0", + MinRetainBlocks: 0, + IndexEvents: make([]string, 0), + IAVLCacheSize: 781250, // 50 MB + IAVLDisableFastNode: true, }, Telemetry: telemetry.Config{ Enabled: false, @@ -268,17 +273,17 @@ func GetConfig(v *viper.Viper) (Config, error) { return Config{ BaseConfig: BaseConfig{ - MinGasPrices: v.GetString("minimum-gas-prices"), - InterBlockCache: v.GetBool("inter-block-cache"), - Pruning: v.GetString("pruning"), - PruningKeepRecent: v.GetString("pruning-keep-recent"), - PruningKeepEvery: v.GetString("pruning-keep-every"), - PruningInterval: v.GetString("pruning-interval"), - HaltHeight: v.GetUint64("halt-height"), - HaltTime: v.GetUint64("halt-time"), - IndexEvents: v.GetStringSlice("index-events"), - MinRetainBlocks: v.GetUint64("min-retain-blocks"), - IAVLCacheSize: v.GetUint64("iavl-cache-size"), + MinGasPrices: v.GetString("minimum-gas-prices"), + InterBlockCache: v.GetBool("inter-block-cache"), + Pruning: v.GetString("pruning"), + PruningKeepRecent: v.GetString("pruning-keep-recent"), + PruningInterval: v.GetString("pruning-interval"), + HaltHeight: v.GetUint64("halt-height"), + HaltTime: v.GetUint64("halt-time"), + IndexEvents: v.GetStringSlice("index-events"), + MinRetainBlocks: v.GetUint64("min-retain-blocks"), + IAVLCacheSize: v.GetUint64("iavl-cache-size"), + IAVLDisableFastNode: v.GetBool("iavl-disable-fastnode"), }, Telemetry: telemetry.Config{ ServiceName: v.GetString("telemetry.service-name"), diff --git a/server/config/toml.go b/server/config/toml.go index 351bd09db2..af7a09295a 100644 --- a/server/config/toml.go +++ b/server/config/toml.go @@ -74,6 +74,10 @@ index-events = {{ .BaseConfig.IndexEvents }} # Default cache size is 50mb. iavl-cache-size = {{ .BaseConfig.IAVLCacheSize }} +# IAVLDisableFastNode enables or disables the fast node feature of IAVL. +# Default is true. +iavl-disable-fastnode = {{ .BaseConfig.IAVLDisableFastNode }} + ############################################################################### ### Telemetry Configuration ### ############################################################################### diff --git a/server/export.go b/server/export.go index 150add98e7..4cbcf27d45 100644 --- a/server/export.go +++ b/server/export.go @@ -4,7 +4,6 @@ package server import ( "fmt" - "io/ioutil" "os" "github.com/spf13/cobra" @@ -49,7 +48,7 @@ func ExportCmd(appExporter types.AppExporter, defaultNodeHome string) *cobra.Com return err } - genesis, err := ioutil.ReadFile(config.GenesisFile()) + genesis, err := os.ReadFile(config.GenesisFile()) if err != nil { return err } diff --git a/server/grpc/gogoreflection/fix_registration.go b/server/grpc/gogoreflection/fix_registration.go index ab77505748..32fc0e8e34 100644 --- a/server/grpc/gogoreflection/fix_registration.go +++ b/server/grpc/gogoreflection/fix_registration.go @@ -9,7 +9,7 @@ import ( _ "github.com/gogo/protobuf/gogoproto" // required so it does register the gogoproto file descriptor gogoproto "github.com/gogo/protobuf/proto" - // nolint: staticcheck + //nolint: staticcheck "github.com/golang/protobuf/proto" dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" _ "github.com/regen-network/cosmos-proto" // look above @@ -86,7 +86,7 @@ func getFileDescriptor(filePath string) []byte { if len(fd) != 0 { return fd } - // nolint: staticcheck + //nolint: staticcheck return proto.FileDescriptor(filePath) } @@ -95,7 +95,7 @@ func getMessageType(name string) reflect.Type { if typ != nil { return typ } - // nolint: staticcheck + //nolint: staticcheck return proto.MessageType(name) } @@ -107,7 +107,7 @@ func getExtension(extID int32, m proto.Message) *gogoproto.ExtensionDesc { } } // check into proto registry - // nolint: staticcheck + //nolint: staticcheck for id, desc := range proto.RegisteredExtensions(m) { if id == extID { return &gogoproto.ExtensionDesc{ @@ -133,7 +133,7 @@ func getExtensionsNumbers(m proto.Message) []int32 { if len(out) != 0 { return out } - // nolint: staticcheck + //nolint: staticcheck protoExts := proto.RegisteredExtensions(m) out = make([]int32, 0, len(protoExts)) for id := range protoExts { diff --git a/server/grpc/gogoreflection/serverreflection.go b/server/grpc/gogoreflection/serverreflection.go index c2a0828e3b..ac1e3c2d05 100644 --- a/server/grpc/gogoreflection/serverreflection.go +++ b/server/grpc/gogoreflection/serverreflection.go @@ -23,6 +23,7 @@ The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. To register server reflection on a gRPC server: + import "google.golang.org/grpc/reflection" s := grpc.NewServer() @@ -32,7 +33,6 @@ To register server reflection on a gRPC server: reflection.Register(s) s.Serve(lis) - */ package gogoreflection // import "google.golang.org/grpc/reflection" @@ -41,13 +41,12 @@ import ( "compress/gzip" "fmt" "io" - "io/ioutil" "log" "reflect" "sort" "sync" - // nolint: staticcheck + //nolint: staticcheck "github.com/golang/protobuf/proto" dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" "google.golang.org/grpc" @@ -219,7 +218,7 @@ func decompress(b []byte) ([]byte, error) { if err != nil { return nil, fmt.Errorf("bad gzipped descriptor: %v", err) } - out, err := ioutil.ReadAll(r) + out, err := io.ReadAll(r) if err != nil { return nil, fmt.Errorf("bad gzipped descriptor: %v", err) } diff --git a/server/grpc/grpc_web.go b/server/grpc/grpc_web.go index e08f007c73..99040ae263 100644 --- a/server/grpc/grpc_web.go +++ b/server/grpc/grpc_web.go @@ -25,8 +25,9 @@ func StartGRPCWeb(grpcSrv *grpc.Server, config config.Config) (*http.Server, err wrappedServer := grpcweb.WrapServer(grpcSrv, options...) grpcWebSrv := &http.Server{ - Addr: config.GRPCWeb.Address, - Handler: wrappedServer, + Addr: config.GRPCWeb.Address, + Handler: wrappedServer, + ReadHeaderTimeout: 500 * time.Millisecond, } errCh := make(chan error) diff --git a/server/grpc/grpc_web_test.go b/server/grpc/grpc_web_test.go index fa657ae941..5fd666fd9d 100644 --- a/server/grpc/grpc_web_test.go +++ b/server/grpc/grpc_web_test.go @@ -7,7 +7,6 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "net/http" "net/textproto" "strconv" @@ -195,7 +194,7 @@ func (s *GRPCWebTestSuite) makeGrpcRequest( return nil, Trailer{}, nil, err } defer resp.Body.Close() - contents, err := ioutil.ReadAll(resp.Body) + contents, err := io.ReadAll(resp.Body) if err != nil { return nil, Trailer{}, nil, err } diff --git a/server/init.go b/server/init.go index 00b7b26af3..331b1b8786 100644 --- a/server/init.go +++ b/server/init.go @@ -59,5 +59,5 @@ func GenerateSaveCoinKey( return sdk.AccAddress{}, "", err } - return sdk.AccAddress(k.GetAddress()), mnemonic, nil + return k.GetAddress(), mnemonic, nil } diff --git a/server/mock/app.go b/server/mock/app.go index f31c6e948f..60ea1ebc94 100644 --- a/server/mock/app.go +++ b/server/mock/app.go @@ -1,18 +1,19 @@ package mock import ( + "context" "encoding/json" "errors" "fmt" "path/filepath" - "github.com/tendermint/tendermint/types" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" bam "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/codec" + storetypes "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -36,7 +37,6 @@ func NewApp(rootDir string, logger log.Logger) (abci.Application, error) { baseApp.SetInitChainer(InitChainer(capKeyMainStore)) - // Set a Route. baseApp.Router().AddRoute(sdk.NewRoute("kvstore", KVStoreHandler(capKeyMainStore))) // Load latest version. @@ -128,3 +128,16 @@ func AppGenStateEmpty(_ *codec.LegacyAmino, _ types.GenesisDoc, _ []json.RawMess appState = json.RawMessage(``) return } + +// Manually write the handlers for this custom message +type MsgServer interface { + Test(ctx context.Context, msg *kvstoreTx) (*sdk.Result, error) +} + +type MsgServerImpl struct { + capKeyMainStore *storetypes.KVStoreKey +} + +func (m MsgServerImpl) Test(ctx context.Context, msg *kvstoreTx) (*sdk.Result, error) { + return KVStoreHandler(m.capKeyMainStore)(sdk.UnwrapSDKContext(ctx), msg) +} diff --git a/server/mock/helpers.go b/server/mock/helpers.go index 88aacb4d8e..334a21d9fd 100644 --- a/server/mock/helpers.go +++ b/server/mock/helpers.go @@ -2,7 +2,6 @@ package mock import ( "fmt" - "io/ioutil" "os" abci "github.com/tendermint/tendermint/abci/types" @@ -14,7 +13,7 @@ import ( func SetupApp() (abci.Application, func(), error) { logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)). With("module", "mock") - rootDir, err := ioutil.TempDir("", "mock-sdk") + rootDir, err := os.MkdirTemp("", "mock-sdk") if err != nil { return nil, nil, err } diff --git a/server/mock/store.go b/server/mock/store.go index 460f99faec..cf60c7f16a 100644 --- a/server/mock/store.go +++ b/server/mock/store.go @@ -121,6 +121,10 @@ func (ms multiStore) SetIAVLCacheSize(size int) { panic("not implemented") } +func (ms multiStore) SetIAVLDisableFastNode(disable bool) { + panic("not implemented") +} + func (ms multiStore) SetInitialVersion(version int64) error { panic("not implemented") } @@ -135,6 +139,10 @@ func (ms multiStore) Restore( panic("not implemented") } +func (ms multiStore) RollbackToVersion(version int64) error { + panic("not implemented") +} + var _ sdk.KVStore = kvStore{} type kvStore struct { diff --git a/server/mock/tx.go b/server/mock/tx.go index 3c93ffb834..6898e0937b 100644 --- a/server/mock/tx.go +++ b/server/mock/tx.go @@ -1,4 +1,4 @@ -//nolint +// nolint package mock import ( diff --git a/server/rollback.go b/server/rollback.go index 7378a1ff83..f39c40cac4 100644 --- a/server/rollback.go +++ b/server/rollback.go @@ -4,13 +4,13 @@ import ( "fmt" "github.com/cosmos/cosmos-sdk/client/flags" - "github.com/cosmos/cosmos-sdk/store/rootmulti" + "github.com/cosmos/cosmos-sdk/server/types" "github.com/spf13/cobra" tmcmd "github.com/tendermint/tendermint/cmd/tendermint/commands" ) // NewRollbackCmd creates a command to rollback tendermint and multistore state by one height. -func NewRollbackCmd(defaultNodeHome string) *cobra.Command { +func NewRollbackCmd(appCreator types.AppCreator, defaultNodeHome string) *cobra.Command { cmd := &cobra.Command{ Use: "rollback", Short: "rollback cosmos-sdk and tendermint state by one height", @@ -30,14 +30,17 @@ application. if err != nil { return err } + app := appCreator(ctx.Logger, db, nil, ctx.Viper) // rollback tendermint state height, hash, err := tmcmd.RollbackState(ctx.Config) if err != nil { return fmt.Errorf("failed to rollback tendermint state: %w", err) } // rollback the multistore - cms := rootmulti.NewStore(db, ctx.Logger) - cms.RollbackToVersion(height) + + if err := app.CommitMultiStore().RollbackToVersion(height); err != nil { + return fmt.Errorf("failed to rollback to version: %w", err) + } fmt.Printf("Rolled back state to height %d and hash %X", height, hash) return nil diff --git a/server/rosetta/client_online.go b/server/rosetta/client_online.go index d03a5998e8..76578c781a 100644 --- a/server/rosetta/client_online.go +++ b/server/rosetta/client_online.go @@ -250,7 +250,8 @@ func (c *Client) TxOperationsAndSignersAccountIdentifiers(signed bool, txBytes [ } // GetTx returns a transaction given its hash. For Rosetta we make a synthetic transaction for BeginBlock -// and EndBlock to adhere to balance tracking rules. +// +// and EndBlock to adhere to balance tracking rules. func (c *Client) GetTx(ctx context.Context, hash string) (*rosettatypes.Transaction, error) { hashBytes, err := hex.DecodeString(hash) if err != nil { diff --git a/server/rosetta/converter.go b/server/rosetta/converter.go index b057a32a6e..4abeb9e1ff 100644 --- a/server/rosetta/converter.go +++ b/server/rosetta/converter.go @@ -36,9 +36,9 @@ import ( // Converter is a utility that can be used to convert // back and forth from rosetta to sdk and tendermint types // IMPORTANT NOTES: -// - IT SHOULD BE USED ONLY TO DEAL WITH THINGS -// IN A STATELESS WAY! IT SHOULD NEVER INTERACT DIRECTLY -// WITH TENDERMINT RPC AND COSMOS GRPC +// - IT SHOULD BE USED ONLY TO DEAL WITH THINGS +// IN A STATELESS WAY! IT SHOULD NEVER INTERACT DIRECTLY +// WITH TENDERMINT RPC AND COSMOS GRPC // // - IT SHOULD RETURN cosmos rosetta gateway error types! type Converter interface { diff --git a/server/rosetta/lib/server/server.go b/server/rosetta/lib/server/server.go index 238583103a..dd60a086aa 100644 --- a/server/rosetta/lib/server/server.go +++ b/server/rosetta/lib/server/server.go @@ -41,7 +41,7 @@ type Server struct { } func (h Server) Start() error { - return http.ListenAndServe(h.addr, h.h) + return http.ListenAndServe(h.addr, h.h) //nolint:gosec } func NewServer(settings Settings) (Server, error) { diff --git a/server/start.go b/server/start.go index a20f73f329..ec536ba20e 100644 --- a/server/start.go +++ b/server/start.go @@ -53,6 +53,8 @@ const ( FlagPruningInterval = "pruning-interval" FlagIndexEvents = "index-events" FlagMinRetainBlocks = "min-retain-blocks" + FlagIAVLCacheSize = "iavl-cache-size" + FlagIAVLFastNode = "iavl-disable-fastnode" // state sync-related flags FlagStateSyncSnapshotInterval = "state-sync.snapshot-interval" @@ -163,6 +165,8 @@ is performed. Note, when enabled, gRPC will also be automatically enabled. cmd.Flags().Uint64(FlagStateSyncSnapshotInterval, 0, "State sync snapshot interval") cmd.Flags().Uint32(FlagStateSyncSnapshotKeepRecent, 2, "State sync snapshot to keep") + cmd.Flags().Bool(FlagIAVLFastNode, true, "Enable fast node for IAVL tree") + // add support for all Tendermint-specific command line options tcmd.AddNodeFlags(cmd) return cmd diff --git a/server/types/app.go b/server/types/app.go index 467f627c60..f4c59afea2 100644 --- a/server/types/app.go +++ b/server/types/app.go @@ -15,6 +15,7 @@ import ( "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/server/api" "github.com/cosmos/cosmos-sdk/server/config" + sdk "github.com/cosmos/cosmos-sdk/types" ) // ServerStartTime defines the time duration that the server need to stay running after startup @@ -51,6 +52,9 @@ type ( // RegisterTendermintService registers the gRPC Query service for tendermint queries. RegisterTendermintService(clientCtx client.Context) + + // CommitMultiStore Returns the multistore instance + CommitMultiStore() sdk.CommitMultiStore } // AppCreator is a function that allows us to lazily initialize an diff --git a/server/util.go b/server/util.go index fd48f446b7..af77989c94 100644 --- a/server/util.go +++ b/server/util.go @@ -287,7 +287,7 @@ func AddCommands(rootCmd *cobra.Command, defaultNodeHome string, appCreator type tendermintCmd, ExportCmd(appExport, defaultNodeHome), version.NewVersionCommand(), - NewRollbackCmd(defaultNodeHome), + NewRollbackCmd(appCreator, defaultNodeHome), ) } diff --git a/simapp/app.go b/simapp/app.go index 53ab1ab8d8..16302ecc35 100644 --- a/simapp/app.go +++ b/simapp/app.go @@ -25,6 +25,7 @@ import ( "github.com/cosmos/cosmos-sdk/server/config" servertypes "github.com/cosmos/cosmos-sdk/server/types" simappparams "github.com/cosmos/cosmos-sdk/simapp/params" + "github.com/cosmos/cosmos-sdk/store/streaming" "github.com/cosmos/cosmos-sdk/testutil/testdata" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" @@ -212,6 +213,12 @@ func NewSimApp( // not include this key. memKeys := sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey, "testingkey") + // configure state listening capabilities using AppOptions + // we are doing nothing with the returned streamingServices and waitGroup in this case + if _, _, err := streaming.LoadStreamingServices(bApp, appOpts, appCodec, keys); err != nil { + tmos.Exit(err.Error()) + } + app := &SimApp{ BaseApp: bApp, legacyAmino: legacyAmino, diff --git a/simapp/export.go b/simapp/export.go index eb213aee24..e6a0525fba 100644 --- a/simapp/export.go +++ b/simapp/export.go @@ -46,7 +46,8 @@ func (app *SimApp) ExportAppStateAndValidators( // prepare for fresh start at zero height // NOTE zero height genesis is a temporary feature which will be deprecated -// in favour of export at a block height +// +// in favour of export at a block height func (app *SimApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs []string) { applyAllowedAddrs := false diff --git a/simapp/helpers/test_helpers.go b/simapp/helpers/test_helpers.go index d3465ab7c3..1eb496e57a 100644 --- a/simapp/helpers/test_helpers.go +++ b/simapp/helpers/test_helpers.go @@ -2,6 +2,7 @@ package helpers import ( "math/rand" + "time" "github.com/cosmos/cosmos-sdk/client" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" @@ -18,10 +19,12 @@ const ( ) // GenTx generates a signed mock transaction. -func GenTx(r *rand.Rand, gen client.TxConfig, msgs []sdk.Msg, feeAmt sdk.Coins, gas uint64, chainID string, accNums, accSeqs []uint64, priv ...cryptotypes.PrivKey) (sdk.Tx, error) { +func GenTx(gen client.TxConfig, msgs []sdk.Msg, feeAmt sdk.Coins, gas uint64, chainID string, accNums, accSeqs []uint64, priv ...cryptotypes.PrivKey) (sdk.Tx, error) { sigs := make([]signing.SignatureV2, len(priv)) // create a random length memo + r := rand.New(rand.NewSource(time.Now().UnixNano())) + memo := simulation.RandStringOfLength(r, simulation.RandIntBetween(r, 0, 100)) signMode := gen.SignModeHandler().DefaultMode() diff --git a/simapp/simd/cmd/root.go b/simapp/simd/cmd/root.go index 2313b1769d..9f8660a27f 100644 --- a/simapp/simd/cmd/root.go +++ b/simapp/simd/cmd/root.go @@ -19,6 +19,7 @@ import ( "github.com/cosmos/cosmos-sdk/client/debug" "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/client/keys" + "github.com/cosmos/cosmos-sdk/client/pruning" "github.com/cosmos/cosmos-sdk/client/rpc" "github.com/cosmos/cosmos-sdk/server" servertypes "github.com/cosmos/cosmos-sdk/server/types" @@ -117,6 +118,7 @@ func initAppConfig() (string, interface{}) { // // In simapp, we set the min gas prices to 0. srvCfg.MinGasPrices = "0stake" + // srvCfg.BaseConfig.IAVLDisableFastNode = true // disable fastnode by default customAppConfig := CustomAppConfig{ Config: *srvCfg, @@ -141,6 +143,7 @@ func initRootCmd(rootCmd *cobra.Command, encodingConfig params.EncodingConfig) { cfg := sdk.GetConfig() cfg.Seal() + a := appCreator{encodingConfig} rootCmd.AddCommand( genutilcli.InitCmd(simapp.ModuleBasics, simapp.DefaultNodeHome), genutilcli.CollectGenTxsCmd(banktypes.GenesisBalancesIterator{}, simapp.DefaultNodeHome), @@ -152,9 +155,9 @@ func initRootCmd(rootCmd *cobra.Command, encodingConfig params.EncodingConfig) { testnetCmd(simapp.ModuleBasics, banktypes.GenesisBalancesIterator{}), debug.Cmd(), config.Cmd(), + pruning.PruningCmd(a.newApp), ) - a := appCreator{encodingConfig} server.AddCommands(rootCmd, simapp.DefaultNodeHome, a.newApp, a.appExport, addModuleInitFlags) // add keybase, auxiliary RPC, query, and tx child commands @@ -272,6 +275,8 @@ func (a appCreator) newApp(logger log.Logger, db dbm.DB, traceStore io.Writer, a baseapp.SetSnapshotStore(snapshotStore), baseapp.SetSnapshotInterval(cast.ToUint64(appOpts.Get(server.FlagStateSyncSnapshotInterval))), baseapp.SetSnapshotKeepRecent(cast.ToUint32(appOpts.Get(server.FlagStateSyncSnapshotKeepRecent))), + baseapp.SetIAVLCacheSize(cast.ToInt(appOpts.Get(server.FlagIAVLCacheSize))), + baseapp.SetIAVLDisableFastNode(cast.ToBool(appOpts.Get(server.FlagIAVLFastNode))), ) } diff --git a/simapp/simd/cmd/testnet.go b/simapp/simd/cmd/testnet.go index fcc47fa7b1..150dd895ca 100644 --- a/simapp/simd/cmd/testnet.go +++ b/simapp/simd/cmd/testnet.go @@ -384,7 +384,7 @@ func calculateIP(ip string, i int) (string, error) { } func writeFile(name string, dir string, contents []byte) error { - writePath := filepath.Join(dir) + writePath := filepath.Join(dir) //nolint:gocritic file := filepath.Join(writePath, name) err := tmos.EnsureDir(writePath, 0o755) diff --git a/simapp/state.go b/simapp/state.go index 539964b58f..68f8589d7c 100644 --- a/simapp/state.go +++ b/simapp/state.go @@ -4,8 +4,8 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "math/rand" + "os" "time" tmjson "github.com/tendermint/tendermint/libs/json" @@ -54,7 +54,7 @@ func AppStateFn(cdc codec.JSONCodec, simManager *module.SimulationManager) simty case config.ParamsFile != "": appParams := make(simtypes.AppParams) - bz, err := ioutil.ReadFile(config.ParamsFile) + bz, err := os.ReadFile(config.ParamsFile) if err != nil { panic(err) } @@ -193,7 +193,7 @@ func AppStateRandomizedFn( // AppStateFromGenesisFileFn util function to generate the genesis AppState // from a genesis.json file. func AppStateFromGenesisFileFn(r io.Reader, cdc codec.JSONCodec, genesisFile string) (tmtypes.GenesisDoc, []simtypes.Account) { - bytes, err := ioutil.ReadFile(genesisFile) + bytes, err := os.ReadFile(genesisFile) if err != nil { panic(err) } diff --git a/simapp/test_helpers.go b/simapp/test_helpers.go index ff641f2c68..1e1bd249c1 100644 --- a/simapp/test_helpers.go +++ b/simapp/test_helpers.go @@ -5,7 +5,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "math/rand" "strconv" "testing" "time" @@ -327,7 +326,6 @@ func SignCheckDeliver( chainID string, accNums, accSeqs []uint64, expSimPass, expPass bool, priv ...cryptotypes.PrivKey, ) (sdk.GasInfo, *sdk.Result, error) { tx, err := helpers.GenTx( - rand.New(rand.NewSource(time.Now().UnixNano())), txCfg, msgs, sdk.Coins{sdk.NewInt64Coin(sdk.DefaultBondDenom, 0)}, @@ -378,7 +376,6 @@ func GenSequenceOfTxs(txGen client.TxConfig, msgs []sdk.Msg, accNums []uint64, i var err error for i := 0; i < numToGenerate; i++ { txs[i], err = helpers.GenTx( - rand.New(rand.NewSource(time.Now().UnixNano())), txGen, msgs, sdk.Coins{sdk.NewInt64Coin(sdk.DefaultBondDenom, 0)}, diff --git a/simapp/utils.go b/simapp/utils.go index b9d12dd03f..09edfcfdd5 100644 --- a/simapp/utils.go +++ b/simapp/utils.go @@ -3,7 +3,7 @@ package simapp import ( "encoding/json" "fmt" - "io/ioutil" + "os" "github.com/tendermint/tendermint/libs/log" dbm "github.com/tendermint/tm-db" @@ -34,7 +34,7 @@ func SetupSimulation(dirPrefix, dbName string) (simtypes.Config, dbm.DB, string, logger = log.NewNopLogger() } - dir, err := ioutil.TempDir("", dirPrefix) + dir, err := os.MkdirTemp("", dirPrefix) if err != nil { return simtypes.Config{}, nil, "", nil, false, err } @@ -56,7 +56,7 @@ func SimulationOperations(app App, cdc codec.JSONCodec, config simtypes.Config) } if config.ParamsFile != "" { - bz, err := ioutil.ReadFile(config.ParamsFile) + bz, err := os.ReadFile(config.ParamsFile) if err != nil { panic(err) } @@ -84,7 +84,7 @@ func CheckExportSimulation( return err } - if err := ioutil.WriteFile(config.ExportStatePath, []byte(exported.AppState), 0o600); err != nil { + if err := os.WriteFile(config.ExportStatePath, []byte(exported.AppState), 0o600); err != nil { return err } } @@ -96,7 +96,7 @@ func CheckExportSimulation( return err } - if err := ioutil.WriteFile(config.ExportParamsPath, paramsBz, 0o600); err != nil { + if err := os.WriteFile(config.ExportParamsPath, paramsBz, 0o600); err != nil { return err } } diff --git a/snapshots/chunk_test.go b/snapshots/chunk_test.go index 78c870b90a..6935c082ad 100644 --- a/snapshots/chunk_test.go +++ b/snapshots/chunk_test.go @@ -4,7 +4,6 @@ import ( "bytes" "errors" "io" - "io/ioutil" "testing" "github.com/stretchr/testify/assert" @@ -66,10 +65,10 @@ func TestChunkWriter(t *testing.T) { require.NoError(t, err) chunkWriter.CloseWithError(theErr) }() - chunk, err := ioutil.ReadAll(<-ch) + chunk, err := io.ReadAll(<-ch) require.NoError(t, err) assert.Equal(t, []byte{1, 2}, chunk) - _, err = ioutil.ReadAll(<-ch) + _, err = io.ReadAll(<-ch) require.Error(t, err) assert.Equal(t, theErr, err) assert.Empty(t, ch) @@ -144,7 +143,7 @@ func TestChunkReader(t *testing.T) { // Closing the reader should close the writer pr, pw = io.Pipe() pch = make(chan io.ReadCloser, 2) - pch <- ioutil.NopCloser(bytes.NewBuffer([]byte{1, 2, 3})) + pch <- io.NopCloser(bytes.NewBuffer([]byte{1, 2, 3})) pch <- pr close(pch) diff --git a/snapshots/helpers_test.go b/snapshots/helpers_test.go index e2ba90864b..219dfd93e9 100644 --- a/snapshots/helpers_test.go +++ b/snapshots/helpers_test.go @@ -7,7 +7,6 @@ import ( "crypto/sha256" "errors" "io" - "io/ioutil" "os" "testing" "time" @@ -44,7 +43,7 @@ func hash(chunks [][]byte) []byte { func makeChunks(chunks [][]byte) <-chan io.ReadCloser { ch := make(chan io.ReadCloser, len(chunks)) for _, chunk := range chunks { - ch <- ioutil.NopCloser(bytes.NewReader(chunk)) + ch <- io.NopCloser(bytes.NewReader(chunk)) } close(ch) return ch @@ -53,7 +52,7 @@ func makeChunks(chunks [][]byte) <-chan io.ReadCloser { func readChunks(chunks <-chan io.ReadCloser) [][]byte { bodies := [][]byte{} for chunk := range chunks { - body, err := ioutil.ReadAll(chunk) + body, err := io.ReadAll(chunk) if err != nil { panic(err) } @@ -147,10 +146,10 @@ func (m *mockSnapshotter) SupportedFormats() []uint32 { // setupBusyManager creates a manager with an empty store that is busy creating a snapshot at height 1. // The snapshot will complete when the returned closer is called. func setupBusyManager(t *testing.T) *snapshots.Manager { - // ioutil.TempDir() is used instead of testing.T.TempDir() + // os.MkdirTemp() is used instead of testing.T.TempDir() // see https://github.com/cosmos/cosmos-sdk/pull/8475 for // this change's rationale. - tempdir, err := ioutil.TempDir("", "") + tempdir, err := os.MkdirTemp("", "") require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(tempdir) }) diff --git a/snapshots/manager.go b/snapshots/manager.go index 02878963d4..74ea4f4d86 100644 --- a/snapshots/manager.go +++ b/snapshots/manager.go @@ -5,7 +5,6 @@ import ( "crypto/sha256" "fmt" "io" - "io/ioutil" "math" "sort" "sync" @@ -41,12 +40,12 @@ type restoreDone struct { // Although the ABCI interface (and this manager) passes chunks as byte slices, the internal // snapshot/restore APIs use IO streams (i.e. chan io.ReadCloser), for two reasons: // -// 1) In the future, ABCI should support streaming. Consider e.g. InitChain during chain -// upgrades, which currently passes the entire chain state as an in-memory byte slice. -// https://github.com/tendermint/tendermint/issues/5184 +// 1. In the future, ABCI should support streaming. Consider e.g. InitChain during chain +// upgrades, which currently passes the entire chain state as an in-memory byte slice. +// https://github.com/tendermint/tendermint/issues/5184 // -// 2) io.ReadCloser streams automatically propagate IO errors, and can pass arbitrary -// errors via io.Pipe.CloseWithError(). +// 2. io.ReadCloser streams automatically propagate IO errors, and can pass arbitrary +// errors via io.Pipe.CloseWithError(). type Manager struct { store *Store multistore types.Snapshotter @@ -220,7 +219,7 @@ func (m *Manager) LoadChunk(height uint64, format uint32, chunk uint32) ([]byte, } defer reader.Close() - return ioutil.ReadAll(reader) + return io.ReadAll(reader) } // Prune prunes snapshots, if no other operations are in progress. @@ -353,7 +352,7 @@ func (m *Manager) RestoreChunk(chunk []byte) (bool, error) { } // Pass the chunk to the restore, and wait for completion if it was the final one. - m.chRestore <- ioutil.NopCloser(bytes.NewReader(chunk)) + m.chRestore <- io.NopCloser(bytes.NewReader(chunk)) m.restoreChunkIndex++ if int(m.restoreChunkIndex) >= len(m.restoreChunkHashes) { diff --git a/snapshots/store.go b/snapshots/store.go index a80c49c0c2..2500ee1422 100644 --- a/snapshots/store.go +++ b/snapshots/store.go @@ -259,7 +259,7 @@ func (s *Store) Save( snapshotHasher := sha256.New() chunkHasher := sha256.New() for chunkBody := range chunks { - defer chunkBody.Close() // nolint: staticcheck + defer chunkBody.Close() //nolint: staticcheck dir := s.pathSnapshot(height, format) err = os.MkdirAll(dir, 0o755) if err != nil { @@ -270,7 +270,7 @@ func (s *Store) Save( if err != nil { return nil, sdkerrors.Wrapf(err, "failed to create snapshot chunk file %q", path) } - defer file.Close() // nolint: staticcheck + defer file.Close() //nolint: staticcheck chunkHasher.Reset() _, err = io.Copy(io.MultiWriter(file, chunkHasher, snapshotHasher), chunkBody) diff --git a/snapshots/store_test.go b/snapshots/store_test.go index 3872525fb4..d3abb2d702 100644 --- a/snapshots/store_test.go +++ b/snapshots/store_test.go @@ -4,7 +4,6 @@ import ( "bytes" "errors" "io" - "io/ioutil" "os" "path/filepath" "testing" @@ -20,10 +19,10 @@ import ( ) func setupStore(t *testing.T) *snapshots.Store { - // ioutil.TempDir() is used instead of testing.T.TempDir() + // os.MkdirTemp() is used instead of testing.T.TempDir() // see https://github.com/cosmos/cosmos-sdk/pull/8475 for // this change's rationale. - tempdir, err := ioutil.TempDir("", "") + tempdir, err := os.MkdirTemp("", "") require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(tempdir) }) @@ -201,7 +200,7 @@ func TestStore_Load(t *testing.T) { for i := uint32(0); i < snapshot.Chunks; i++ { reader, ok := <-chunks require.True(t, ok) - chunk, err := ioutil.ReadAll(reader) + chunk, err := io.ReadAll(reader) require.NoError(t, err) err = reader.Close() require.NoError(t, err) @@ -226,7 +225,7 @@ func TestStore_LoadChunk(t *testing.T) { chunk, err = store.LoadChunk(2, 1, 0) require.NoError(t, err) require.NotNil(t, chunk) - body, err := ioutil.ReadAll(chunk) + body, err := io.ReadAll(chunk) require.NoError(t, err) assert.Equal(t, []byte{2, 1, 0}, body) err = chunk.Close() @@ -323,7 +322,7 @@ func TestStore_Save(t *testing.T) { ch := make(chan io.ReadCloser, 2) ch <- pr - ch <- ioutil.NopCloser(bytes.NewBuffer([]byte{0xff})) + ch <- io.NopCloser(bytes.NewBuffer([]byte{0xff})) close(ch) _, err = store.Save(6, 1, ch) diff --git a/snapshots/types/util.go b/snapshots/types/util.go index 125ea6fb46..348b505768 100644 --- a/snapshots/types/util.go +++ b/snapshots/types/util.go @@ -4,7 +4,7 @@ import ( protoio "github.com/gogo/protobuf/io" ) -// WriteExtensionItem writes an item payload for current extention snapshotter. +// WriteExtensionItem writes an item payload for current extension snapshotter. func WriteExtensionItem(protoWriter protoio.Writer, item []byte) error { return protoWriter.WriteMsg(&SnapshotItem{ Item: &SnapshotItem_ExtensionPayload{ diff --git a/store/cache/cache_test.go b/store/cache/cache_test.go index 45c5d147a4..1a38981adb 100644 --- a/store/cache/cache_test.go +++ b/store/cache/cache_test.go @@ -18,7 +18,7 @@ func TestGetOrSetStoreCache(t *testing.T) { mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) sKey := types.NewKVStoreKey("test") - tree, err := iavl.NewMutableTree(db, 100) + tree, err := iavl.NewMutableTree(db, 100, false) require.NoError(t, err) store := iavlstore.UnsafeNewStore(tree) store2 := mngr.GetStoreCache(sKey, store) @@ -32,7 +32,7 @@ func TestUnwrap(t *testing.T) { mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) sKey := types.NewKVStoreKey("test") - tree, err := iavl.NewMutableTree(db, 100) + tree, err := iavl.NewMutableTree(db, 100, false) require.NoError(t, err) store := iavlstore.UnsafeNewStore(tree) _ = mngr.GetStoreCache(sKey, store) @@ -46,7 +46,7 @@ func TestStoreCache(t *testing.T) { mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) sKey := types.NewKVStoreKey("test") - tree, err := iavl.NewMutableTree(db, 100) + tree, err := iavl.NewMutableTree(db, 100, false) require.NoError(t, err) store := iavlstore.UnsafeNewStore(tree) kvStore := mngr.GetStoreCache(sKey, store) diff --git a/store/cachekv/mergeiterator.go b/store/cachekv/mergeiterator.go index 25dfac8033..b8bbee6c5d 100644 --- a/store/cachekv/mergeiterator.go +++ b/store/cachekv/mergeiterator.go @@ -33,7 +33,8 @@ func newCacheMergeIterator(parent, cache types.Iterator, ascending bool) *cacheM } // Domain implements Iterator. -// If the domains are different, returns the union. +// It returns the union of the iter.Parent doman, and the iter.Cache domain. +// If the domains are disjoint, this includes the domain in between them as well. func (iter *cacheMergeIterator) Domain() (start, end []byte) { startP, endP := iter.parent.Domain() startC, endC := iter.cache.Domain() diff --git a/store/cachekv/search_benchmark_test.go b/store/cachekv/search_benchmark_test.go index d7f1dcb8d4..921bff4e38 100644 --- a/store/cachekv/search_benchmark_test.go +++ b/store/cachekv/search_benchmark_test.go @@ -1,9 +1,10 @@ package cachekv import ( - db "github.com/tendermint/tm-db" "strconv" "testing" + + db "github.com/tendermint/tm-db" ) func BenchmarkLargeUnsortedMisses(b *testing.B) { diff --git a/store/cachekv/store.go b/store/cachekv/store.go index 7004073ea1..e596689877 100644 --- a/store/cachekv/store.go +++ b/store/cachekv/store.go @@ -313,17 +313,7 @@ func (store *Store) dirtyItems(start, end []byte) { } sort.Strings(strL) - // Now find the values within the domain - // [start, end) - startIndex := findStartIndex(strL, startStr) - endIndex := findEndIndex(strL, endStr) - - if endIndex < 0 { - endIndex = len(strL) - 1 - } - if startIndex < 0 { - startIndex = 0 - } + startIndex, endIndex := findStartEndIndex(strL, startStr, endStr) // Since we spent cycles to sort the values, we should process and remove a reasonable amount // ensure start to end is at least minSortSize in size @@ -347,17 +337,23 @@ func (store *Store) dirtyItems(start, end []byte) { store.clearUnsortedCacheSubset(kvL, stateAlreadySorted) } -func (store *Store) clearUnsortedCacheSubset(unsorted []*kv.Pair, sortState sortState) { - n := len(store.unsortedCache) - if len(unsorted) == n { // This pattern allows the Go compiler to emit the map clearing idiom for the entire map. - for key := range store.unsortedCache { - delete(store.unsortedCache, key) - } - } else { // Otherwise, normally delete the unsorted keys from the map. - for _, kv := range unsorted { - delete(store.unsortedCache, conv.UnsafeBytesToStr(kv.Key)) - } +func findStartEndIndex(strL []string, startStr, endStr string) (int, int) { + // Now find the values within the domain + // [start, end) + startIndex := findStartIndex(strL, startStr) + endIndex := findEndIndex(strL, endStr) + + if endIndex < 0 { + endIndex = len(strL) - 1 + } + if startIndex < 0 { + startIndex = 0 } + return startIndex, endIndex +} + +func (store *Store) clearUnsortedCacheSubset(unsorted []*kv.Pair, sortState sortState) { + store.deleteKeysFromUnsortedCache(unsorted) if sortState == stateUnsorted { sort.Slice(unsorted, func(i, j int) bool { @@ -369,6 +365,7 @@ func (store *Store) clearUnsortedCacheSubset(unsorted []*kv.Pair, sortState sort if item.Value == nil { // deleted element, tracked by store.deleted // setting arbitrary value + // TODO: Don't ignore this error. store.sortedCache.Set(item.Key, []byte{}) continue } @@ -379,11 +376,26 @@ func (store *Store) clearUnsortedCacheSubset(unsorted []*kv.Pair, sortState sort } } +func (store *Store) deleteKeysFromUnsortedCache(unsorted []*kv.Pair) { + n := len(store.unsortedCache) + if len(unsorted) == n { // This pattern allows the Go compiler to emit the map clearing idiom for the entire map. + for key := range store.unsortedCache { + delete(store.unsortedCache, key) + } + } else { // Otherwise, normally delete the unsorted keys from the map. + for _, kv := range unsorted { + delete(store.unsortedCache, conv.UnsafeBytesToStr(kv.Key)) + } + } +} + //---------------------------------------- // etc // Only entrypoint to mutate store.cache. func (store *Store) setCacheValue(key, value []byte, deleted bool, dirty bool) { + types.AssertValidKey(key) + keyStr := conv.UnsafeBytesToStr(key) store.cache[keyStr] = &cValue{ value: value, @@ -395,7 +407,7 @@ func (store *Store) setCacheValue(key, value []byte, deleted bool, dirty bool) { delete(store.deleted, keyStr) } if dirty { - store.unsortedCache[conv.UnsafeBytesToStr(key)] = struct{}{} + store.unsortedCache[keyStr] = struct{}{} } } diff --git a/store/cachemulti/store.go b/store/cachemulti/store.go index 203466d764..d64dc03aca 100644 --- a/store/cachemulti/store.go +++ b/store/cachemulti/store.go @@ -8,6 +8,8 @@ import ( "github.com/cosmos/cosmos-sdk/store/cachekv" "github.com/cosmos/cosmos-sdk/store/dbadapter" + "github.com/cosmos/cosmos-sdk/store/listenkv" + "github.com/cosmos/cosmos-sdk/store/tracekv" "github.com/cosmos/cosmos-sdk/store/types" ) @@ -39,6 +41,9 @@ func NewFromKVStore( keys map[string]types.StoreKey, traceWriter io.Writer, traceContext types.TraceContext, listeners map[types.StoreKey][]types.WriteListener, ) Store { + if listeners == nil { + listeners = make(map[types.StoreKey][]types.WriteListener) + } cms := Store{ db: cachekv.NewStore(store), stores: make(map[types.StoreKey]types.CacheWrap, len(stores)), @@ -49,17 +54,13 @@ func NewFromKVStore( } for key, store := range stores { - var cacheWrapped types.CacheWrap if cms.TracingEnabled() { - cacheWrapped = store.CacheWrapWithTrace(cms.traceWriter, cms.traceContext) - } else { - cacheWrapped = store.CacheWrap() + store = tracekv.NewStore(store.(types.KVStore), cms.traceWriter, cms.traceContext) } if cms.ListeningEnabled(key) { - cms.stores[key] = cacheWrapped.CacheWrapWithListeners(key, cms.listeners[key]) - } else { - cms.stores[key] = cacheWrapped + store = listenkv.NewStore(store.(types.KVStore), key, listeners[key]) } + cms.stores[key] = cachekv.NewStore(store.(types.KVStore)) } return cms @@ -80,7 +81,8 @@ func newCacheMultiStoreFromCMS(cms Store) Store { stores[k] = v } - return NewFromKVStore(cms.db, stores, nil, cms.traceWriter, cms.traceContext, cms.listeners) + // don't pass listeners to nested cache store. + return NewFromKVStore(cms.db, stores, nil, cms.traceWriter, cms.traceContext, nil) } // SetTracer sets the tracer for the MultiStore that the underlying diff --git a/store/iavl/store.go b/store/iavl/store.go index bae00d79aa..a44cc1d531 100644 --- a/store/iavl/store.go +++ b/store/iavl/store.go @@ -42,16 +42,16 @@ type Store struct { // LoadStore returns an IAVL Store as a CommitKVStore. Internally, it will load the // store's version (id) from the provided DB. An error is returned if the version // fails to load, or if called with a positive version on an empty tree. -func LoadStore(db dbm.DB, logger log.Logger, key types.StoreKey, id types.CommitID, lazyLoading bool, cacheSize int) (types.CommitKVStore, error) { - return LoadStoreWithInitialVersion(db, logger, key, id, lazyLoading, 0, cacheSize) +func LoadStore(db dbm.DB, logger log.Logger, key types.StoreKey, id types.CommitID, lazyLoading bool, cacheSize int, disableFastNode bool) (types.CommitKVStore, error) { + return LoadStoreWithInitialVersion(db, logger, key, id, lazyLoading, 0, cacheSize, disableFastNode) } // LoadStoreWithInitialVersion returns an IAVL Store as a CommitKVStore setting its initialVersion // to the one given. Internally, it will load the store's version (id) from the // provided DB. An error is returned if the version fails to load, or if called with a positive // version on an empty tree. -func LoadStoreWithInitialVersion(db dbm.DB, logger log.Logger, key types.StoreKey, id types.CommitID, lazyLoading bool, initialVersion uint64, cacheSize int) (types.CommitKVStore, error) { - tree, err := iavl.NewMutableTreeWithOpts(db, cacheSize, &iavl.Options{InitialVersion: initialVersion}) +func LoadStoreWithInitialVersion(db dbm.DB, logger log.Logger, key types.StoreKey, id types.CommitID, lazyLoading bool, initialVersion uint64, cacheSize int, disableFastNode bool) (types.CommitKVStore, error) { + tree, err := iavl.NewMutableTreeWithOpts(db, cacheSize, &iavl.Options{InitialVersion: initialVersion}, disableFastNode) if err != nil { return nil, err } @@ -233,6 +233,12 @@ func (st *Store) DeleteVersions(versions ...int64) error { return st.tree.DeleteVersions(versions...) } +// LoadVersionForOverwriting attempts to load a tree at a previously committed +// version, or the latest version below it. Any versions greater than targetVersion will be deleted. +func (st *Store) LoadVersionForOverwriting(targetVersion int64) (int64, error) { + return st.tree.LoadVersionForOverwriting(targetVersion) +} + // Implements types.KVStore. func (st *Store) Iterator(start, end []byte) types.Iterator { iterator, err := st.tree.Iterator(start, end, true) @@ -416,7 +422,7 @@ var _ types.Iterator = (*iavlIterator)(nil) // newIAVLIterator will create a new iavlIterator. // CONTRACT: Caller must release the iavlIterator, as each one creates a new // goroutine. -func newIAVLIterator(tree *iavl.ImmutableTree, start, end []byte, ascending bool) *iavlIterator { +func newIAVLIterator(tree *iavl.ImmutableTree, start, end []byte, ascending bool) *iavlIterator { //nolint:unused iterator, err := tree.Iterator(start, end, ascending) if err != nil { panic(err) diff --git a/store/iavl/store_test.go b/store/iavl/store_test.go index c70bedfd7e..f1cd586e36 100644 --- a/store/iavl/store_test.go +++ b/store/iavl/store_test.go @@ -34,7 +34,7 @@ func randBytes(numBytes int) []byte { // make a tree with data from above and save it func newAlohaTree(t *testing.T, db dbm.DB) (*iavl.MutableTree, types.CommitID) { - tree, err := iavl.NewMutableTree(db, cacheSize) + tree, err := iavl.NewMutableTree(db, cacheSize, false) require.NoError(t, err) for k, v := range treeData { @@ -100,17 +100,17 @@ func TestLoadStore(t *testing.T) { require.Equal(t, string(hcStore.Get([]byte("hello"))), "ciao") // Querying a new store at some previous non-pruned height H - newHStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDH, false, DefaultIAVLCacheSize) + newHStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDH, false, DefaultIAVLCacheSize, false) require.NoError(t, err) require.Equal(t, string(newHStore.Get([]byte("hello"))), "hallo") // Querying a new store at some previous pruned height Hp - newHpStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDHp, false, DefaultIAVLCacheSize) + newHpStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDHp, false, DefaultIAVLCacheSize, false) require.NoError(t, err) require.Equal(t, string(newHpStore.Get([]byte("hello"))), "hola") // Querying a new store at current height H - newHcStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDHc, false, DefaultIAVLCacheSize) + newHcStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDHc, false, DefaultIAVLCacheSize, false) require.NoError(t, err) require.Equal(t, string(newHcStore.Get([]byte("hello"))), "ciao") } @@ -281,7 +281,7 @@ func TestIAVLIterator(t *testing.T) { func TestIAVLReverseIterator(t *testing.T) { db := dbm.NewMemDB() - tree, err := iavl.NewMutableTree(db, cacheSize) + tree, err := iavl.NewMutableTree(db, cacheSize, false) require.NoError(t, err) iavlStore := UnsafeNewStore(tree) @@ -314,7 +314,7 @@ func TestIAVLReverseIterator(t *testing.T) { func TestIAVLPrefixIterator(t *testing.T) { db := dbm.NewMemDB() - tree, err := iavl.NewMutableTree(db, cacheSize) + tree, err := iavl.NewMutableTree(db, cacheSize, false) require.NoError(t, err) iavlStore := UnsafeNewStore(tree) @@ -378,7 +378,7 @@ func TestIAVLPrefixIterator(t *testing.T) { func TestIAVLReversePrefixIterator(t *testing.T) { db := dbm.NewMemDB() - tree, err := iavl.NewMutableTree(db, cacheSize) + tree, err := iavl.NewMutableTree(db, cacheSize, false) require.NoError(t, err) iavlStore := UnsafeNewStore(tree) @@ -446,7 +446,7 @@ func nextVersion(iavl *Store) { func TestIAVLNoPrune(t *testing.T) { db := dbm.NewMemDB() - tree, err := iavl.NewMutableTree(db, cacheSize) + tree, err := iavl.NewMutableTree(db, cacheSize, false) require.NoError(t, err) iavlStore := UnsafeNewStore(tree) @@ -465,7 +465,7 @@ func TestIAVLNoPrune(t *testing.T) { func TestIAVLStoreQuery(t *testing.T) { db := dbm.NewMemDB() - tree, err := iavl.NewMutableTree(db, cacheSize) + tree, err := iavl.NewMutableTree(db, cacheSize, false) require.NoError(t, err) iavlStore := UnsafeNewStore(tree) @@ -569,7 +569,7 @@ func BenchmarkIAVLIteratorNext(b *testing.B) { b.ReportAllocs() db := dbm.NewMemDB() treeSize := 1000 - tree, err := iavl.NewMutableTree(db, cacheSize) + tree, err := iavl.NewMutableTree(db, cacheSize, false) require.NoError(b, err) for i := 0; i < treeSize; i++ { @@ -603,7 +603,7 @@ func TestSetInitialVersion(t *testing.T) { { "works with a mutable tree", func(db *dbm.MemDB) *Store { - tree, err := iavl.NewMutableTree(db, cacheSize) + tree, err := iavl.NewMutableTree(db, cacheSize, false) require.NoError(t, err) store := UnsafeNewStore(tree) @@ -613,7 +613,7 @@ func TestSetInitialVersion(t *testing.T) { { "throws error on immutable tree", func(db *dbm.MemDB) *Store { - tree, err := iavl.NewMutableTree(db, cacheSize) + tree, err := iavl.NewMutableTree(db, cacheSize, false) require.NoError(t, err) store := UnsafeNewStore(tree) _, version, err := store.tree.SaveVersion() diff --git a/store/iavl/tree.go b/store/iavl/tree.go index d1228c6822..04861e8406 100644 --- a/store/iavl/tree.go +++ b/store/iavl/tree.go @@ -33,6 +33,7 @@ type ( GetImmutable(version int64) (*iavl.ImmutableTree, error) SetInitialVersion(version uint64) Iterator(start, end []byte, ascending bool) (types.Iterator, error) + LoadVersionForOverwriting(targetVersion int64) (int64, error) } // immutableTree is a simple wrapper around a reference to an iavl.ImmutableTree @@ -94,3 +95,7 @@ func (it *immutableTree) GetImmutable(version int64) (*iavl.ImmutableTree, error return it.ImmutableTree, nil } + +func (it *immutableTree) LoadVersionForOverwriting(targetVersion int64) (int64, error) { + panic("cannot call 'LoadVersionForOverwriting' on an immutable IAVL tree") +} diff --git a/store/iavl/tree_test.go b/store/iavl/tree_test.go index 44ba844473..02d19a97bf 100644 --- a/store/iavl/tree_test.go +++ b/store/iavl/tree_test.go @@ -10,7 +10,7 @@ import ( func TestImmutableTreePanics(t *testing.T) { t.Parallel() - immTree := iavl.NewImmutableTree(dbm.NewMemDB(), 100) + immTree := iavl.NewImmutableTree(dbm.NewMemDB(), 100, false) it := &immutableTree{immTree} require.Panics(t, func() { it.Set([]byte{}, []byte{}) }) require.Panics(t, func() { it.Remove([]byte{}) }) diff --git a/store/mem/store.go b/store/mem/store.go index c8aa6dca59..b7eda8b129 100644 --- a/store/mem/store.go +++ b/store/mem/store.go @@ -27,7 +27,7 @@ func NewStore() *Store { return NewStoreWithDB(dbm.NewMemDB()) } -func NewStoreWithDB(db *dbm.MemDB) *Store { // nolint: interfacer +func NewStoreWithDB(db *dbm.MemDB) *Store { //nolint: interfacer return &Store{Store: dbadapter.Store{DB: db}} } diff --git a/store/prefix/store_test.go b/store/prefix/store_test.go index bf49e9cfe5..25e07cbb1a 100644 --- a/store/prefix/store_test.go +++ b/store/prefix/store_test.go @@ -90,7 +90,7 @@ func testPrefixStore(t *testing.T, baseStore types.KVStore, prefix []byte) { func TestIAVLStorePrefix(t *testing.T) { db := dbm.NewMemDB() - tree, err := tiavl.NewMutableTree(db, cacheSize) + tree, err := tiavl.NewMutableTree(db, cacheSize, false) require.NoError(t, err) iavlStore := iavl.UnsafeNewStore(tree) diff --git a/store/rootmulti/proof_test.go b/store/rootmulti/proof_test.go index 3e5d42067d..3769d884eb 100644 --- a/store/rootmulti/proof_test.go +++ b/store/rootmulti/proof_test.go @@ -15,7 +15,7 @@ import ( func TestVerifyIAVLStoreQueryProof(t *testing.T) { // Create main tree for testing. db := dbm.NewMemDB() - iStore, err := iavl.LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), types.CommitID{}, false, iavl.DefaultIAVLCacheSize) + iStore, err := iavl.LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), types.CommitID{}, false, iavl.DefaultIAVLCacheSize, false) store := iStore.(*iavl.Store) require.Nil(t, err) store.Set([]byte("MYKEY"), []byte("MYVALUE")) diff --git a/store/rootmulti/rollback_test.go b/store/rootmulti/rollback_test.go new file mode 100644 index 0000000000..c2e199606e --- /dev/null +++ b/store/rootmulti/rollback_test.go @@ -0,0 +1,98 @@ +package rootmulti_test + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/cosmos/cosmos-sdk/simapp" + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + dbm "github.com/tendermint/tm-db" +) + +func setup(withGenesis bool, invCheckPeriod uint, db dbm.DB) (*simapp.SimApp, simapp.GenesisState) { + encCdc := simapp.MakeTestEncodingConfig() + app := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, simapp.DefaultNodeHome, invCheckPeriod, encCdc, simapp.EmptyAppOptions{}) + if withGenesis { + return app, simapp.NewDefaultGenesisState(encCdc.Marshaler) + } + return app, simapp.GenesisState{} +} + +// Setup initializes a new SimApp. A Nop logger is set in SimApp. +func SetupWithDB(isCheckTx bool, db dbm.DB) *simapp.SimApp { + app, genesisState := setup(!isCheckTx, 5, db) + if !isCheckTx { + // init chain must be called to stop deliverState from being nil + stateBytes, err := json.MarshalIndent(genesisState, "", " ") + if err != nil { + panic(err) + } + + // Initialize the chain + app.InitChain( + abci.RequestInitChain{ + Validators: []abci.ValidatorUpdate{}, + ConsensusParams: simapp.DefaultConsensusParams, + AppStateBytes: stateBytes, + }, + ) + } + + return app +} + +func TestRollback(t *testing.T) { + t.Skip() + db := dbm.NewMemDB() + app := SetupWithDB(false, db) + app.Commit() + ver0 := app.LastBlockHeight() + // commit 10 blocks + for i := int64(1); i <= 10; i++ { + header := tmproto.Header{ + Height: ver0 + i, + AppHash: app.LastCommitID().Hash, + } + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + ctx := app.NewContext(false, header) + store := ctx.KVStore(app.GetKey("bank")) + store.Set([]byte("key"), []byte(fmt.Sprintf("value%d", i))) + app.Commit() + } + + require.Equal(t, ver0+10, app.LastBlockHeight()) + store := app.NewContext(true, tmproto.Header{}).KVStore(app.GetKey("bank")) + require.Equal(t, []byte("value10"), store.Get([]byte("key"))) + + // rollback 5 blocks + target := ver0 + 5 + require.NoError(t, app.CommitMultiStore().RollbackToVersion(target)) + require.Equal(t, target, app.LastBlockHeight()) + + // recreate app to have clean check state + encCdc := simapp.MakeTestEncodingConfig() + app = simapp.NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, simapp.DefaultNodeHome, 5, encCdc, simapp.EmptyAppOptions{}) + store = app.NewContext(true, tmproto.Header{}).KVStore(app.GetKey("bank")) + require.Equal(t, []byte("value5"), store.Get([]byte("key"))) + + // commit another 5 blocks with different values + for i := int64(6); i <= 10; i++ { + header := tmproto.Header{ + Height: ver0 + i, + AppHash: app.LastCommitID().Hash, + } + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + ctx := app.NewContext(false, header) + store := ctx.KVStore(app.GetKey("bank")) + store.Set([]byte("key"), []byte(fmt.Sprintf("VALUE%d", i))) + app.Commit() + } + + require.Equal(t, ver0+10, app.LastBlockHeight()) + store = app.NewContext(true, tmproto.Header{}).KVStore(app.GetKey("bank")) + require.Equal(t, []byte("VALUE10"), store.Get([]byte("key"))) +} diff --git a/store/rootmulti/store.go b/store/rootmulti/store.go index 1f8ef097d0..7e5d032850 100644 --- a/store/rootmulti/store.go +++ b/store/rootmulti/store.go @@ -15,6 +15,7 @@ import ( "github.com/pkg/errors" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/proto/tendermint/crypto" dbm "github.com/tendermint/tm-db" snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" @@ -33,23 +34,28 @@ const ( latestVersionKey = "s/latest" pruneHeightsKey = "s/pruneheights" commitInfoKeyFmt = "s/%d" // s/ + + proofsPath = "proofs" ) +const iavlDisablefastNodeDefault = true + // Store is composed of many CommitStores. Name contrasts with // cacheMultiStore which is used for branching other MultiStores. It implements // the CommitMultiStore interface. type Store struct { - db dbm.DB - logger log.Logger - lastCommitInfo *types.CommitInfo - pruningOpts types.PruningOptions - iavlCacheSize int - storesParams map[types.StoreKey]storeParams - stores map[types.StoreKey]types.CommitKVStore - keysByName map[string]types.StoreKey - lazyLoading bool - pruneHeights []int64 - initialVersion int64 + db dbm.DB + logger log.Logger + lastCommitInfo *types.CommitInfo + pruningOpts types.PruningOptions + iavlCacheSize int + iavlDisableFastNode bool + storesParams map[types.StoreKey]storeParams + stores map[types.StoreKey]types.CommitKVStore + keysByName map[string]types.StoreKey + lazyLoading bool + pruneHeights []int64 + initialVersion int64 traceWriter io.Writer traceContext types.TraceContext @@ -71,15 +77,16 @@ var ( // LoadVersion must be called. func NewStore(db dbm.DB, logger log.Logger) *Store { return &Store{ - db: db, - logger: logger, - pruningOpts: types.PruneNothing, - iavlCacheSize: iavl.DefaultIAVLCacheSize, - storesParams: make(map[types.StoreKey]storeParams), - stores: make(map[types.StoreKey]types.CommitKVStore), - keysByName: make(map[string]types.StoreKey), - pruneHeights: make([]int64, 0), - listeners: make(map[types.StoreKey][]types.WriteListener), + db: db, + logger: logger, + pruningOpts: types.PruneNothing, + iavlCacheSize: iavl.DefaultIAVLCacheSize, + iavlDisableFastNode: iavlDisablefastNodeDefault, + storesParams: make(map[types.StoreKey]storeParams), + stores: make(map[types.StoreKey]types.CommitKVStore), + keysByName: make(map[string]types.StoreKey), + pruneHeights: make([]int64, 0), + listeners: make(map[types.StoreKey][]types.WriteListener), } } @@ -99,6 +106,10 @@ func (rs *Store) SetIAVLCacheSize(cacheSize int) { rs.iavlCacheSize = cacheSize } +func (rs *Store) SetIAVLDisableFastNode(disableFastNode bool) { + rs.iavlDisableFastNode = disableFastNode +} + // SetLazyLoading sets if the iavl store should be loaded lazily or not func (rs *Store) SetLazyLoading(lazyLoading bool) { rs.lazyLoading = lazyLoading @@ -156,7 +167,7 @@ func (rs *Store) GetStores() map[types.StoreKey]types.CommitKVStore { // LoadLatestVersionAndUpgrade implements CommitMultiStore func (rs *Store) LoadLatestVersionAndUpgrade(upgrades *types.StoreUpgrades) error { - ver := getLatestVersion(rs.db) + ver := GetLatestVersion(rs.db) return rs.loadVersion(ver, upgrades) } @@ -167,7 +178,7 @@ func (rs *Store) LoadVersionAndUpgrade(ver int64, upgrades *types.StoreUpgrades) // LoadLatestVersion implements CommitMultiStore. func (rs *Store) LoadLatestVersion() error { - ver := getLatestVersion(rs.db) + ver := GetLatestVersion(rs.db) return rs.loadVersion(ver, nil) } @@ -230,9 +241,7 @@ func (rs *Store) loadVersion(ver int64, upgrades *types.StoreUpgrades) error { // If it was deleted, remove all data if upgrades.IsDeleted(key.Name()) { - if err := deleteKVStore(store.(types.KVStore)); err != nil { - return errors.Wrapf(err, "failed to delete store %s", key.Name()) - } + deleteKVStore(store.(types.KVStore)) } else if oldName := upgrades.RenamedFrom(key.Name()); oldName != "" { // handle renames specially // make an unregistered key to satify loadCommitStore params @@ -247,9 +256,7 @@ func (rs *Store) loadVersion(ver int64, upgrades *types.StoreUpgrades) error { } // move all data - if err := moveKVStoreData(oldStore.(types.KVStore), store.(types.KVStore)); err != nil { - return errors.Wrapf(err, "failed to move store %s -> %s", oldName, key.Name()) - } + moveKVStoreData(oldStore.(types.KVStore), store.(types.KVStore)) } } @@ -274,7 +281,7 @@ func (rs *Store) getCommitID(infos map[string]types.StoreInfo, name string) type return info.CommitId } -func deleteKVStore(kv types.KVStore) error { +func deleteKVStore(kv types.KVStore) { // Note that we cannot write while iterating, so load all keys here, delete below var keys [][]byte itr := kv.Iterator(nil, nil) @@ -287,11 +294,10 @@ func deleteKVStore(kv types.KVStore) error { for _, k := range keys { kv.Delete(k) } - return nil } // we simulate move by a copy and delete -func moveKVStoreData(oldDB types.KVStore, newDB types.KVStore) error { +func moveKVStoreData(oldDB types.KVStore, newDB types.KVStore) { // we read from one and write to another itr := oldDB.Iterator(nil, nil) for itr.Valid() { @@ -301,7 +307,7 @@ func moveKVStoreData(oldDB types.KVStore, newDB types.KVStore) error { itr.Close() // then delete the old store - return deleteKVStore(oldDB) + deleteKVStore(oldDB) } // SetInterBlockCache sets the Store's internal inter-block (persistent) cache. @@ -378,7 +384,7 @@ func (rs *Store) ListeningEnabled(key types.StoreKey) bool { func (rs *Store) LastCommitID() types.CommitID { if rs.lastCommitInfo == nil { return types.CommitID{ - Version: getLatestVersion(rs.db), + Version: GetLatestVersion(rs.db), } } @@ -420,7 +426,7 @@ func (rs *Store) Commit() types.CommitID { // batch prune if the current height is a pruning interval height if rs.pruningOpts.Interval > 0 && version%int64(rs.pruningOpts.Interval) == 0 { - rs.pruneStores() + rs.PruneStores(true, nil) } flushMetadata(rs.db, version, rs.lastCommitInfo, rs.pruneHeights) @@ -431,9 +437,14 @@ func (rs *Store) Commit() types.CommitID { } } -// pruneStores will batch delete a list of heights from each mounted sub-store. -// Afterwards, pruneHeights is reset. -func (rs *Store) pruneStores() { +// PruneStores will batch delete a list of heights from each mounted sub-store. +// If clearStorePruningHeihgts is true, store's pruneHeights is appended to the +// pruningHeights and reset after finishing pruning. +func (rs *Store) PruneStores(clearStorePruningHeihgts bool, pruningHeights []int64) { + if clearStorePruningHeihgts { + pruningHeights = append(pruningHeights, rs.pruneHeights...) + } + if len(rs.pruneHeights) == 0 { return } @@ -444,7 +455,7 @@ func (rs *Store) pruneStores() { // it to get the underlying IAVL store. store = rs.GetCommitKVStore(key) - if err := store.(*iavl.Store).DeleteVersions(rs.pruneHeights...); err != nil { + if err := store.(*iavl.Store).DeleteVersions(pruningHeights...); err != nil { if errCause := errors.Cause(err); errCause != nil && errCause != iavltree.ErrVersionDoesNotExist { panic(err) } @@ -452,7 +463,9 @@ func (rs *Store) pruneStores() { } } - rs.pruneHeights = make([]int64, 0) + if clearStorePruningHeihgts { + rs.pruneHeights = make([]int64, 0) + } } // CacheWrap implements CacheWrapper/Store/CommitStore. @@ -564,22 +577,28 @@ func (rs *Store) GetStoreByName(name string) types.Store { // Query calls substore.Query with the same `req` where `req.Path` is // modified to remove the substore prefix. // Ie. `req.Path` here is `//`, and trimmed to `/` for the substore. -// TODO: add proof for `multistore -> substore`. +// Special case: if `req.Path` is `/proofs`, the commit hash is included +// as response value. In addition, proofs of every store are appended to the response for +// the requested height func (rs *Store) Query(req abci.RequestQuery) abci.ResponseQuery { path := req.Path - storeName, subpath, err := parsePath(path) + firstPath, subpath, err := parsePath(path) if err != nil { return sdkerrors.QueryResult(err) } - store := rs.GetStoreByName(storeName) + if firstPath == proofsPath { + return rs.doProofsQuery(req) + } + + store := rs.GetStoreByName(firstPath) if store == nil { - return sdkerrors.QueryResult(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "no such store: %s", storeName)) + return sdkerrors.QueryResult(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "no such store: %s", firstPath)) } queryable, ok := store.(types.Queryable) if !ok { - return sdkerrors.QueryResult(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "store %s (type %T) doesn't support queries", storeName, store)) + return sdkerrors.QueryResult(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "store %s (type %T) doesn't support queries", firstPath, store)) } // trim the path and make the query @@ -609,13 +628,14 @@ func (rs *Store) Query(req abci.RequestQuery) abci.ResponseQuery { } // Restore origin path and append proof op. - res.ProofOps.Ops = append(res.ProofOps.Ops, commitInfo.ProofOp(storeName)) + res.ProofOps.Ops = append(res.ProofOps.Ops, commitInfo.ProofOp(firstPath)) return res } // SetInitialVersion sets the initial version of the IAVL tree. It is used when // starting a new chain at an arbitrary height. +// NOTE: this never errors. Can we fix the function signature ? func (rs *Store) SetInitialVersion(version int64) error { rs.initialVersion = version @@ -837,9 +857,9 @@ func (rs *Store) loadCommitStoreFromParams(key types.StoreKey, id types.CommitID var err error if params.initialVersion == 0 { - store, err = iavl.LoadStore(db, rs.logger, key, id, rs.lazyLoading, rs.iavlCacheSize) + store, err = iavl.LoadStore(db, rs.logger, key, id, rs.lazyLoading, rs.iavlCacheSize, rs.iavlDisableFastNode) } else { - store, err = iavl.LoadStoreWithInitialVersion(db, rs.logger, key, id, rs.lazyLoading, params.initialVersion, rs.iavlCacheSize) + store, err = iavl.LoadStoreWithInitialVersion(db, rs.logger, key, id, rs.lazyLoading, params.initialVersion, rs.iavlCacheSize, rs.iavlDisableFastNode) } if err != nil { @@ -896,27 +916,26 @@ func (rs *Store) buildCommitInfo(version int64) *types.CommitInfo { } // RollbackToVersion delete the versions after `target` and update the latest version. -func (rs *Store) RollbackToVersion(target int64) int64 { - if target < 0 { - panic("Negative rollback target") - } - current := getLatestVersion(rs.db) - if target >= current { - return current +func (rs *Store) RollbackToVersion(target int64) error { + if target <= 0 { + return fmt.Errorf("invalid rollback height target: %d", target) } - for ; current > target; current-- { - rs.pruneHeights = append(rs.pruneHeights, current) - } - rs.pruneStores() - // update latest height - bz, err := gogotypes.StdInt64Marshal(current) - if err != nil { - panic(err) + for key, store := range rs.stores { + if store.GetStoreType() == types.StoreTypeIAVL { + // If the store is wrapped with an inter-block cache, we must first unwrap + // it to get the underlying IAVL store. + store = rs.GetCommitKVStore(key) + _, err := store.(*iavl.Store).LoadVersionForOverwriting(target) + if err != nil { + return err + } + } } - rs.db.Set([]byte(latestVersionKey), bz) - return current + flushMetadata(rs.db, target, rs.buildCommitInfo(target), []int64{}) + + return rs.LoadLatestVersion() } type storeParams struct { @@ -926,7 +945,7 @@ type storeParams struct { initialVersion uint64 } -func getLatestVersion(db dbm.DB) int64 { +func GetLatestVersion(db dbm.DB) int64 { bz, err := db.Get([]byte(latestVersionKey)) if err != nil { panic(err) @@ -966,6 +985,24 @@ func commitStores(version int64, storeMap map[types.StoreKey]types.CommitKVStore } } +func (rs *Store) doProofsQuery(req abci.RequestQuery) abci.ResponseQuery { + commitInfo, err := getCommitInfo(rs.db, req.Height) + if err != nil { + return sdkerrors.QueryResult(err) + } + res := abci.ResponseQuery{ + Height: req.Height, + Key: []byte(proofsPath), + Value: commitInfo.CommitID().Hash, + ProofOps: &crypto.ProofOps{Ops: make([]crypto.ProofOp, 0, len(commitInfo.StoreInfos))}, + } + + for _, storeInfo := range commitInfo.StoreInfos { + res.ProofOps.Ops = append(res.ProofOps.Ops, commitInfo.ProofOp(storeInfo.Name)) + } + return res +} + // Gets commitInfo from disk. func getCommitInfo(db dbm.DB, ver int64) (*types.CommitInfo, error) { cInfoKey := fmt.Sprintf(commitInfoKeyFmt, ver) diff --git a/store/rootmulti/store_test.go b/store/rootmulti/store_test.go index 428c243ab7..fd706dd3c9 100644 --- a/store/rootmulti/store_test.go +++ b/store/rootmulti/store_test.go @@ -410,7 +410,7 @@ func TestMultiStoreQuery(t *testing.T) { k2, v2 := []byte("water"), []byte("flows") // v3 := []byte("is cold") - cid := multi.Commit() + cid1 := multi.Commit() // Make sure we can get by name. garbage := multi.GetStoreByName("bad-name") @@ -425,8 +425,8 @@ func TestMultiStoreQuery(t *testing.T) { store2.Set(k2, v2) // Commit the multistore. - cid = multi.Commit() - ver := cid.Version + cid2 := multi.Commit() + ver := cid2.Version // Reload multistore from database multi = newMultiStoreWithMounts(db, types.PruneNothing) @@ -468,6 +468,26 @@ func TestMultiStoreQuery(t *testing.T) { qres = multi.Query(query) require.EqualValues(t, 0, qres.Code) require.Equal(t, v2, qres.Value) + + // Test proofs latest height + query.Path = fmt.Sprintf("/%s", proofsPath) + qres = multi.Query(query) + require.EqualValues(t, 0, qres.Code) + require.NotNil(t, qres.ProofOps) + require.Equal(t, []byte(proofsPath), qres.Key) + require.Equal(t, cid2.Hash, qres.Value) + require.Equal(t, cid2.Version, qres.Height) + require.Equal(t, 3, len(qres.ProofOps.Ops)) // 3 mounted stores + + // Test proofs second latest height + query.Height = query.Height - 1 + qres = multi.Query(query) + require.EqualValues(t, 0, qres.Code) + require.NotNil(t, qres.ProofOps) + require.Equal(t, []byte(proofsPath), qres.Key) + require.Equal(t, cid1.Hash, qres.Value) + require.Equal(t, cid1.Version, qres.Height) + require.Equal(t, 3, len(qres.ProofOps.Ops)) // 3 mounted stores } func TestMultiStore_Pruning(t *testing.T) { @@ -813,3 +833,52 @@ func hashStores(stores map[types.StoreKey]types.CommitKVStore) []byte { } return sdkmaps.HashFromMap(m) } + +type MockListener struct { + stateCache []types.StoreKVPair +} + +func (tl *MockListener) OnWrite(storeKey types.StoreKey, key []byte, value []byte, delete bool) error { + tl.stateCache = append(tl.stateCache, types.StoreKVPair{ + StoreKey: storeKey.Name(), + Key: key, + Value: value, + Delete: delete, + }) + return nil +} + +func TestStateListeners(t *testing.T) { + var db dbm.DB = dbm.NewMemDB() + ms := newMultiStoreWithMounts(db, types.NewPruningOptionsFromString(types.PruningOptionNothing)) + + listener := &MockListener{} + ms.AddListeners(testStoreKey1, []types.WriteListener{listener}) + + require.NoError(t, ms.LoadLatestVersion()) + cacheMulti := ms.CacheMultiStore() + + store1 := cacheMulti.GetKVStore(testStoreKey1) + store1.Set([]byte{1}, []byte{1}) + require.Empty(t, listener.stateCache) + + // writes are observed when cache store commit. + cacheMulti.Write() + require.Equal(t, 1, len(listener.stateCache)) + + // test nested cache store + listener.stateCache = []types.StoreKVPair{} + nested := cacheMulti.CacheMultiStore() + + store1 = nested.GetKVStore(testStoreKey1) + store1.Set([]byte{1}, []byte{1}) + require.Empty(t, listener.stateCache) + + // writes are not observed when nested cache store commit + nested.Write() + require.Empty(t, listener.stateCache) + + // writes are observed when inner cache store commit + cacheMulti.Write() + require.Equal(t, 1, len(listener.stateCache)) +} diff --git a/store/streaming/README.md b/store/streaming/README.md new file mode 100644 index 0000000000..819514aef7 --- /dev/null +++ b/store/streaming/README.md @@ -0,0 +1,67 @@ +# State Streaming Service +This package contains the constructors for the `StreamingService`s used to write state changes out from individual KVStores to a +file or stream, as described in [ADR-038](../../docs/architecture/adr-038-state-listening.md) and defined in [types/streaming.go](../../baseapp/streaming.go). +The child directories contain the implementations for specific output destinations. + +Currently, a `StreamingService` implementation that writes state changes out to files is supported, in the future support for additional +output destinations can be added. + +The `StreamingService` is configured from within an App using the `AppOptions` loaded from the app.toml file: + +```toml +[store] + streamers = [ # if len(streamers) > 0 we are streaming + "file", # name of the streaming service, used by constructor + ] + +[streamers] + [streamers.file] + keys = ["list", "of", "store", "keys", "we", "want", "to", "expose", "for", "this", "streaming", "service"] + write_dir = "path to the write directory" + prefix = "optional prefix to prepend to the generated file names" +``` + +`store.streamers` contains a list of the names of the `StreamingService` implementations to employ which are used by `ServiceTypeFromString` +to return the `ServiceConstructor` for that particular implementation: + + +```go +listeners := cast.ToStringSlice(appOpts.Get("store.streamers")) +for _, listenerName := range listeners { + constructor, err := ServiceTypeFromString(listenerName) + if err != nil { + // handle error + } +} +``` + +`streamers` contains a mapping of the specific `StreamingService` implementation name to the configuration parameters for that specific service. +`streamers.x.keys` contains the list of `StoreKey` names for the KVStores to expose using this service and is required by every type of `StreamingService`. +In order to expose *all* KVStores, we can include `*` in this list. An empty list is equivalent to turning the service off. + +Additional configuration parameters are optional and specific to the implementation. +In the case of the file streaming service, `streamers.file.write_dir` contains the path to the +directory to write the files to, and `streamers.file.prefix` contains an optional prefix to prepend to the output files to prevent potential collisions +with other App `StreamingService` output files. + +The `ServiceConstructor` accepts `AppOptions`, the store keys collected using `streamers.x.keys`, a `BinaryMarshaller` and +returns a `StreamingService` implementation. The `AppOptions` are passed in to provide access to any implementation specific configuration options, +e.g. in the case of the file streaming service the `streamers.file.write_dir` and `streamers.file.prefix`. + +```go +streamingService, err := constructor(appOpts, exposeStoreKeys, appCodec) +if err != nil { + // handler error +} +``` + +The returned `StreamingService` is loaded into the BaseApp using the BaseApp's `SetStreamingService` method. +The `Stream` method is called on the service to begin the streaming process. Depending on the implementation this process +may be synchronous or asynchronous with the message processing of the state machine. + +```go +bApp.SetStreamingService(streamingService) +wg := new(sync.WaitGroup) +quitChan := make(chan struct{}) +streamingService.Stream(wg, quitChan) +``` diff --git a/store/streaming/constructor.go b/store/streaming/constructor.go new file mode 100644 index 0000000000..e576f84b83 --- /dev/null +++ b/store/streaming/constructor.go @@ -0,0 +1,137 @@ +package streaming + +import ( + "fmt" + "strings" + "sync" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/codec" + serverTypes "github.com/cosmos/cosmos-sdk/server/types" + "github.com/cosmos/cosmos-sdk/store/streaming/file" + "github.com/cosmos/cosmos-sdk/store/types" + + "github.com/spf13/cast" +) + +// ServiceConstructor is used to construct a streaming service +type ServiceConstructor func(opts serverTypes.AppOptions, keys []types.StoreKey, marshaller codec.BinaryCodec) (baseapp.StreamingService, error) + +// ServiceType enum for specifying the type of StreamingService +type ServiceType int + +const ( + Unknown ServiceType = iota + File + // add more in the future +) + +// ServiceTypeFromString returns the streaming.ServiceType corresponding to the provided name +func ServiceTypeFromString(name string) ServiceType { + switch strings.ToLower(name) { + case "file", "f": + return File + default: + return Unknown + } +} + +// String returns the string name of a streaming.ServiceType +func (sst ServiceType) String() string { + switch sst { + case File: + return "file" + default: + return "unknown" + } +} + +// ServiceConstructorLookupTable is a mapping of streaming.ServiceTypes to streaming.ServiceConstructors +var ServiceConstructorLookupTable = map[ServiceType]ServiceConstructor{ + File: NewFileStreamingService, +} + +// NewServiceConstructor returns the streaming.ServiceConstructor corresponding to the provided name +func NewServiceConstructor(name string) (ServiceConstructor, error) { + ssType := ServiceTypeFromString(name) + if ssType == Unknown { + return nil, fmt.Errorf("unrecognized streaming service name %s", name) + } + if constructor, ok := ServiceConstructorLookupTable[ssType]; ok && constructor != nil { + return constructor, nil + } + return nil, fmt.Errorf("streaming service constructor of type %s not found", ssType.String()) +} + +// NewFileStreamingService is the streaming.ServiceConstructor function for creating a FileStreamingService +func NewFileStreamingService(opts serverTypes.AppOptions, keys []types.StoreKey, marshaller codec.BinaryCodec) (baseapp.StreamingService, error) { + filePrefix := cast.ToString(opts.Get("streamers.file.prefix")) + fileDir := cast.ToString(opts.Get("streamers.file.write_dir")) + return file.NewStreamingService(fileDir, filePrefix, keys, marshaller) +} + +// LoadStreamingServices is a function for loading StreamingServices onto the BaseApp using the provided AppOptions, codec, and keys +// It returns the WaitGroup and quit channel used to synchronize with the streaming services and any error that occurs during the setup +func LoadStreamingServices(bApp *baseapp.BaseApp, appOpts serverTypes.AppOptions, appCodec codec.BinaryCodec, keys map[string]*types.KVStoreKey) ([]baseapp.StreamingService, *sync.WaitGroup, error) { + // waitgroup and quit channel for optional shutdown coordination of the streaming service(s) + wg := new(sync.WaitGroup) + // configure state listening capabilities using AppOptions + streamers := cast.ToStringSlice(appOpts.Get("store.streamers")) + activeStreamers := make([]baseapp.StreamingService, 0, len(streamers)) + for _, streamerName := range streamers { + // get the store keys allowed to be exposed for this streaming service + exposeKeyStrs := cast.ToStringSlice(appOpts.Get(fmt.Sprintf("streamers.%s.keys", streamerName))) + var exposeStoreKeys []types.StoreKey + if exposeAll(exposeKeyStrs) { // if list contains `*`, expose all StoreKeys + exposeStoreKeys = make([]types.StoreKey, 0, len(keys)) + for _, storeKey := range keys { + exposeStoreKeys = append(exposeStoreKeys, storeKey) + } + } else { + exposeStoreKeys = make([]types.StoreKey, 0, len(exposeKeyStrs)) + for _, keyStr := range exposeKeyStrs { + if storeKey, ok := keys[keyStr]; ok { + exposeStoreKeys = append(exposeStoreKeys, storeKey) + } + } + } + if len(exposeStoreKeys) == 0 { // short circuit if we are not exposing anything + continue + } + // get the constructor for this streamer name + constructor, err := NewServiceConstructor(streamerName) + if err != nil { + // close any services we may have already spun up before hitting the error on this one + for _, activeStreamer := range activeStreamers { + activeStreamer.Close() + } + return nil, nil, err + } + // generate the streaming service using the constructor, appOptions, and the StoreKeys we want to expose + streamingService, err := constructor(appOpts, exposeStoreKeys, appCodec) + if err != nil { + // close any services we may have already spun up before hitting the error on this one + for _, activeStreamer := range activeStreamers { + activeStreamer.Close() + } + return nil, nil, err + } + // register the streaming service with the BaseApp + bApp.SetStreamingService(streamingService) + // kick off the background streaming service loop + streamingService.Stream(wg) + // add to the list of active streamers + activeStreamers = append(activeStreamers, streamingService) + } + // if there are no active streamers, activeStreamers is empty (len == 0) and the waitGroup is not waiting on anything + return activeStreamers, wg, nil +} + +func exposeAll(list []string) bool { + for _, ele := range list { + if ele == "*" { + return true + } + } + return false +} diff --git a/store/streaming/constructor_test.go b/store/streaming/constructor_test.go new file mode 100644 index 0000000000..5f9d58016f --- /dev/null +++ b/store/streaming/constructor_test.go @@ -0,0 +1,43 @@ +package streaming + +import ( + "testing" + + "github.com/cosmos/cosmos-sdk/codec" + codecTypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/store/streaming/file" + "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/stretchr/testify/require" +) + +type fakeOptions struct{} + +func (f *fakeOptions) Get(string) interface{} { return nil } + +var ( + mockOptions = new(fakeOptions) + mockKeys = []types.StoreKey{sdk.NewKVStoreKey("mockKey1"), sdk.NewKVStoreKey("mockKey2")} + interfaceRegistry = codecTypes.NewInterfaceRegistry() + testMarshaller = codec.NewProtoCodec(interfaceRegistry) +) + +func TestStreamingServiceConstructor(t *testing.T) { + _, err := NewServiceConstructor("unexpectedName") + require.NotNil(t, err) + + constructor, err := NewServiceConstructor("file") + require.Nil(t, err) + var expectedType ServiceConstructor + require.IsType(t, expectedType, constructor) + + serv, err := constructor(mockOptions, mockKeys, testMarshaller) + require.Nil(t, err) + require.IsType(t, &file.StreamingService{}, serv) + listeners := serv.Listeners() + for _, key := range mockKeys { + _, ok := listeners[key] + require.True(t, ok) + } +} diff --git a/store/streaming/file/README.md b/store/streaming/file/README.md new file mode 100644 index 0000000000..7243f15dd0 --- /dev/null +++ b/store/streaming/file/README.md @@ -0,0 +1,64 @@ +# File Streaming Service +This pkg contains an implementation of the [StreamingService](../../../baseapp/streaming.go) that writes +the data stream out to files on the local filesystem. This process is performed synchronously with the message processing +of the state machine. + +## Configuration + +The `file.StreamingService` is configured from within an App using the `AppOptions` loaded from the app.toml file: + +```toml +[store] + streamers = [ # if len(streamers) > 0 we are streaming + "file", # name of the streaming service, used by constructor + ] + +[streamers] + [streamers.file] + keys = ["list", "of", "store", "keys", "we", "want", "to", "expose", "for", "this", "streaming", "service"] + write_dir = "path to the write directory" + prefix = "optional prefix to prepend to the generated file names" +``` + +We turn the service on by adding its name, "file", to `store.streamers`- the list of streaming services for this App to employ. + +In `streamers.file` we include three configuration parameters for the file streaming service: +1. `streamers.x.keys` contains the list of `StoreKey` names for the KVStores to expose using this service. +In order to expose *all* KVStores, we can include `*` in this list. An empty list is equivalent to turning the service off. +2. `streamers.file.write_dir` contains the path to the directory to write the files to. +3. `streamers.file.prefix` contains an optional prefix to prepend to the output files to prevent potential collisions +with other App `StreamingService` output files. + +##### Encoding + +For each pair of `BeginBlock` requests and responses, a file is created and named `block-{N}-begin`, where N is the block number. +At the head of this file the length-prefixed protobuf encoded `BeginBlock` request is written. +At the tail of this file the length-prefixed protobuf encoded `BeginBlock` response is written. +In between these two encoded messages, the state changes that occurred due to the `BeginBlock` request are written chronologically as +a series of length-prefixed protobuf encoded `StoreKVPair`s representing `Set` and `Delete` operations within the KVStores the service +is configured to listen to. + +For each pair of `DeliverTx` requests and responses, a file is created and named `block-{N}-tx-{M}` where N is the block number and M +is the tx number in the block (i.e. 0, 1, 2...). +At the head of this file the length-prefixed protobuf encoded `DeliverTx` request is written. +At the tail of this file the length-prefixed protobuf encoded `DeliverTx` response is written. +In between these two encoded messages, the state changes that occurred due to the `DeliverTx` request are written chronologically as +a series of length-prefixed protobuf encoded `StoreKVPair`s representing `Set` and `Delete` operations within the KVStores the service +is configured to listen to. + +For each pair of `EndBlock` requests and responses, a file is created and named `block-{N}-end`, where N is the block number. +At the head of this file the length-prefixed protobuf encoded `EndBlock` request is written. +At the tail of this file the length-prefixed protobuf encoded `EndBlock` response is written. +In between these two encoded messages, the state changes that occurred due to the `EndBlock` request are written chronologically as +a series of length-prefixed protobuf encoded `StoreKVPair`s representing `Set` and `Delete` operations within the KVStores the service +is configured to listen to. + +##### Decoding + +To decode the files written in the above format we read all the bytes from a given file into memory and segment them into proto +messages based on the length-prefixing of each message. Once segmented, it is known that the first message is the ABCI request, +the last message is the ABCI response, and that every message in between is a `StoreKVPair`. This enables us to decode each segment into +the appropriate message type. + +The type of ABCI req/res, the block height, and the transaction index (where relevant) is known +from the file name, and the KVStore each `StoreKVPair` originates from is known since the `StoreKey` is included as a field in the proto message. diff --git a/store/streaming/file/example_config.toml b/store/streaming/file/example_config.toml new file mode 100644 index 0000000000..8202bd8ef5 --- /dev/null +++ b/store/streaming/file/example_config.toml @@ -0,0 +1,10 @@ +[store] + streamers = [ # if len(streamers) > 0 we are streaming + "file", # name of the streaming service, used by constructor + ] + +[streamers] + [streamers.file] + keys = ["list", "of", "store", "keys", "we", "want", "to", "expose", "for", "this", "streaming", "service"] + write_dir = "path to the write directory" + prefix = "optional prefix to prepend to the generated file names" diff --git a/store/streaming/file/service.go b/store/streaming/file/service.go new file mode 100644 index 0000000000..685ba7d9d7 --- /dev/null +++ b/store/streaming/file/service.go @@ -0,0 +1,278 @@ +package file + +import ( + "errors" + "fmt" + "os" + "path" + "path/filepath" + "sync" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +var _ baseapp.StreamingService = &StreamingService{} + +// StreamingService is a concrete implementation of StreamingService that writes state changes out to files +type StreamingService struct { + listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp + srcChan <-chan []byte // the channel that all the WriteListeners write their data out to + filePrefix string // optional prefix for each of the generated files + writeDir string // directory to write files into + codec codec.BinaryCodec // marshaller used for re-marshalling the ABCI messages to write them out to the destination files + stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received + stateCacheLock *sync.Mutex // mutex for the state cache + currentBlockNumber int64 // the current block number + currentTxIndex int64 // the index of the current tx + quitChan chan struct{} // channel to synchronize closure +} + +// IntermediateWriter is used so that we do not need to update the underlying io.Writer +// inside the StoreKVPairWriteListener everytime we begin writing to a new file +type IntermediateWriter struct { + outChan chan<- []byte +} + +// NewIntermediateWriter create an instance of an intermediateWriter that sends to the provided channel +func NewIntermediateWriter(outChan chan<- []byte) *IntermediateWriter { + return &IntermediateWriter{ + outChan: outChan, + } +} + +// Write satisfies io.Writer +func (iw *IntermediateWriter) Write(b []byte) (int, error) { + iw.outChan <- b + return len(b), nil +} + +// NewStreamingService creates a new StreamingService for the provided writeDir, (optional) filePrefix, and storeKeys +func NewStreamingService(writeDir, filePrefix string, storeKeys []types.StoreKey, c codec.BinaryCodec) (*StreamingService, error) { + listenChan := make(chan []byte) + iw := NewIntermediateWriter(listenChan) + listener := types.NewStoreKVPairWriteListener(iw, c) + listeners := make(map[types.StoreKey][]types.WriteListener, len(storeKeys)) + // in this case, we are using the same listener for each Store + for _, key := range storeKeys { + listeners[key] = append(listeners[key], listener) + } + // check that the writeDir exists and is writeable so that we can catch the error here at initialization if it is not + // we don't open a dstFile until we receive our first ABCI message + if err := isDirWriteable(writeDir); err != nil { + return nil, err + } + return &StreamingService{ + listeners: listeners, + srcChan: listenChan, + filePrefix: filePrefix, + writeDir: writeDir, + codec: c, + stateCache: make([][]byte, 0), + stateCacheLock: new(sync.Mutex), + }, nil +} + +// Listeners satisfies the baseapp.StreamingService interface +// It returns the StreamingService's underlying WriteListeners +// Use for registering the underlying WriteListeners with the BaseApp +func (fss *StreamingService) Listeners() map[types.StoreKey][]types.WriteListener { + return fss.listeners +} + +// ListenBeginBlock satisfies the baseapp.ABCIListener interface +// It writes the received BeginBlock request and response and the resulting state changes +// out to a file as described in the above the naming schema +func (fss *StreamingService) ListenBeginBlock(ctx sdk.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error { + // generate the new file + dstFile, err := fss.openBeginBlockFile(req) + if err != nil { + return err + } + // write req to file + lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) + if err != nil { + return err + } + if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { + return err + } + // write all state changes cached for this stage to file + fss.stateCacheLock.Lock() + for _, stateChange := range fss.stateCache { + if _, err = dstFile.Write(stateChange); err != nil { + fss.stateCache = nil + fss.stateCacheLock.Unlock() + return err + } + } + // reset cache + fss.stateCache = nil + fss.stateCacheLock.Unlock() + // write res to file + lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) + if err != nil { + return err + } + if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { + return err + } + // close file + return dstFile.Close() +} + +func (fss *StreamingService) openBeginBlockFile(req abci.RequestBeginBlock) (*os.File, error) { + fss.currentBlockNumber = req.GetHeader().Height + fss.currentTxIndex = 0 + fileName := fmt.Sprintf("block-%d-begin", fss.currentBlockNumber) + if fss.filePrefix != "" { + fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) + } + return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0o600) +} + +// ListenDeliverTx satisfies the baseapp.ABCIListener interface +// It writes the received DeliverTx request and response and the resulting state changes +// out to a file as described in the above the naming schema +func (fss *StreamingService) ListenDeliverTx(ctx sdk.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error { + // generate the new file + dstFile, err := fss.openDeliverTxFile() + if err != nil { + return err + } + // write req to file + lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) + if err != nil { + return err + } + if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { + return err + } + // write all state changes cached for this stage to file + fss.stateCacheLock.Lock() + for _, stateChange := range fss.stateCache { + if _, err = dstFile.Write(stateChange); err != nil { + fss.stateCache = nil + fss.stateCacheLock.Unlock() + return err + } + } + // reset cache + fss.stateCache = nil + fss.stateCacheLock.Unlock() + // write res to file + lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) + if err != nil { + return err + } + if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { + return err + } + // close file + return dstFile.Close() +} + +func (fss *StreamingService) openDeliverTxFile() (*os.File, error) { + fileName := fmt.Sprintf("block-%d-tx-%d", fss.currentBlockNumber, fss.currentTxIndex) + if fss.filePrefix != "" { + fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) + } + fss.currentTxIndex++ + return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0o600) +} + +// ListenEndBlock satisfies the baseapp.ABCIListener interface +// It writes the received EndBlock request and response and the resulting state changes +// out to a file as described in the above the naming schema +func (fss *StreamingService) ListenEndBlock(ctx sdk.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error { + // generate the new file + dstFile, err := fss.openEndBlockFile() + if err != nil { + return err + } + // write req to file + lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) + if err != nil { + return err + } + if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { + return err + } + // write all state changes cached for this stage to file + fss.stateCacheLock.Lock() + for _, stateChange := range fss.stateCache { + if _, err = dstFile.Write(stateChange); err != nil { + fss.stateCache = nil + fss.stateCacheLock.Unlock() + return err + } + } + // reset cache + fss.stateCache = nil + fss.stateCacheLock.Unlock() + // write res to file + lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) + if err != nil { + return err + } + if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { + return err + } + // close file + return dstFile.Close() +} + +func (fss *StreamingService) openEndBlockFile() (*os.File, error) { + fileName := fmt.Sprintf("block-%d-end", fss.currentBlockNumber) + if fss.filePrefix != "" { + fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) + } + return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0o600) +} + +// Stream satisfies the baseapp.StreamingService interface +// It spins up a goroutine select loop which awaits length-prefixed binary encoded KV pairs +// and caches them in the order they were received +// returns an error if it is called twice +func (fss *StreamingService) Stream(wg *sync.WaitGroup) error { + if fss.quitChan != nil { + return errors.New("`Stream` has already been called. The stream needs to be closed before it can be started again") + } + fss.quitChan = make(chan struct{}) + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-fss.quitChan: + fss.quitChan = nil + return + case by := <-fss.srcChan: + fss.stateCacheLock.Lock() + fss.stateCache = append(fss.stateCache, by) + fss.stateCacheLock.Unlock() + } + } + }() + return nil +} + +// Close satisfies the io.Closer interface, which satisfies the baseapp.StreamingService interface +func (fss *StreamingService) Close() error { + close(fss.quitChan) + return nil +} + +// isDirWriteable checks if dir is writable by writing and removing a file +// to dir. It returns nil if dir is writable. +func isDirWriteable(dir string) error { + f := path.Join(dir, ".touch") + if err := os.WriteFile(f, []byte(""), 0o600); err != nil { + return err + } + return os.Remove(f) +} diff --git a/store/streaming/file/service_test.go b/store/streaming/file/service_test.go new file mode 100644 index 0000000000..6ecad254b5 --- /dev/null +++ b/store/streaming/file/service_test.go @@ -0,0 +1,400 @@ +package file + +import ( + "encoding/binary" + "fmt" + "os" + "path/filepath" + "sync" + "testing" + + "github.com/cosmos/cosmos-sdk/codec" + codecTypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + types1 "github.com/tendermint/tendermint/proto/tendermint/types" +) + +var ( + interfaceRegistry = codecTypes.NewInterfaceRegistry() + testMarshaller = codec.NewProtoCodec(interfaceRegistry) + testStreamingService *StreamingService + testListener1, testListener2 types.WriteListener + emptyContext = sdk.Context{} + + // test abci message types + mockHash = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9} + testBeginBlockReq = abci.RequestBeginBlock{ + Header: types1.Header{ + Height: 1, + }, + ByzantineValidators: []abci.Evidence{}, + Hash: mockHash, + LastCommitInfo: abci.LastCommitInfo{ + Round: 1, + Votes: []abci.VoteInfo{}, + }, + } + testBeginBlockRes = abci.ResponseBeginBlock{ + Events: []abci.Event{ + { + Type: "testEventType1", + }, + { + Type: "testEventType2", + }, + }, + } + testEndBlockReq = abci.RequestEndBlock{ + Height: 1, + } + testEndBlockRes = abci.ResponseEndBlock{ + Events: []abci.Event{}, + ConsensusParamUpdates: &abci.ConsensusParams{}, + ValidatorUpdates: []abci.ValidatorUpdate{}, + } + mockTxBytes1 = []byte{9, 8, 7, 6, 5, 4, 3, 2, 1} + testDeliverTxReq1 = abci.RequestDeliverTx{ + Tx: mockTxBytes1, + } + mockTxBytes2 = []byte{8, 7, 6, 5, 4, 3, 2} + testDeliverTxReq2 = abci.RequestDeliverTx{ + Tx: mockTxBytes2, + } + mockTxResponseData1 = []byte{1, 3, 5, 7, 9} + testDeliverTxRes1 = abci.ResponseDeliverTx{ + Events: []abci.Event{}, + Code: 1, + Codespace: "mockCodeSpace", + Data: mockTxResponseData1, + GasUsed: 2, + GasWanted: 3, + Info: "mockInfo", + Log: "mockLog", + } + mockTxResponseData2 = []byte{1, 3, 5, 7, 9} + testDeliverTxRes2 = abci.ResponseDeliverTx{ + Events: []abci.Event{}, + Code: 1, + Codespace: "mockCodeSpace", + Data: mockTxResponseData2, + GasUsed: 2, + GasWanted: 3, + Info: "mockInfo", + Log: "mockLog", + } + + // mock store keys + mockStoreKey1 = sdk.NewKVStoreKey("mockStore1") + mockStoreKey2 = sdk.NewKVStoreKey("mockStore2") + + // file stuff + testPrefix = "testPrefix" + testDir = "./.test" + + // mock state changes + mockKey1 = []byte{1, 2, 3} + mockValue1 = []byte{3, 2, 1} + mockKey2 = []byte{2, 3, 4} + mockValue2 = []byte{4, 3, 2} + mockKey3 = []byte{3, 4, 5} + mockValue3 = []byte{5, 4, 3} +) + +func TestIntermediateWriter(t *testing.T) { + outChan := make(chan []byte, 0) + iw := NewIntermediateWriter(outChan) + require.IsType(t, &IntermediateWriter{}, iw) + testBytes := []byte{1, 2, 3, 4, 5} + var length int + var err error + waitChan := make(chan struct{}, 0) + go func() { + length, err = iw.Write(testBytes) + waitChan <- struct{}{} + }() + receivedBytes := <-outChan + <-waitChan + require.Equal(t, len(testBytes), length) + require.Equal(t, testBytes, receivedBytes) + require.Nil(t, err) +} + +func TestFileStreamingService(t *testing.T) { + if os.Getenv("CI") != "" { + t.Skip("Skipping TestFileStreamingService in CI environment") + } + err := os.Mkdir(testDir, 0o700) + require.Nil(t, err) + defer os.RemoveAll(testDir) + + testKeys := []types.StoreKey{mockStoreKey1, mockStoreKey2} + testStreamingService, err = NewStreamingService(testDir, testPrefix, testKeys, testMarshaller) + require.Nil(t, err) + require.IsType(t, &StreamingService{}, testStreamingService) + require.Equal(t, testPrefix, testStreamingService.filePrefix) + require.Equal(t, testDir, testStreamingService.writeDir) + require.Equal(t, testMarshaller, testStreamingService.codec) + testListener1 = testStreamingService.listeners[mockStoreKey1][0] + testListener2 = testStreamingService.listeners[mockStoreKey2][0] + wg := new(sync.WaitGroup) + testStreamingService.Stream(wg) + testListenBeginBlock(t) + testListenDeliverTx1(t) + testListenDeliverTx2(t) + testListenEndBlock(t) + testStreamingService.Close() + wg.Wait() +} + +func testListenBeginBlock(t *testing.T) { + expectedBeginBlockReqBytes, err := testMarshaller.Marshal(&testBeginBlockReq) + require.Nil(t, err) + expectedBeginBlockResBytes, err := testMarshaller.Marshal(&testBeginBlockRes) + require.Nil(t, err) + + // write state changes + testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey2, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey1, mockKey3, mockValue3, false) + + // expected KV pairs + expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey1, + Value: mockValue1, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey2, + Value: mockValue2, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey3, + Value: mockValue3, + Delete: false, + }) + require.Nil(t, err) + + // send the ABCI messages + err = testStreamingService.ListenBeginBlock(emptyContext, testBeginBlockReq, testBeginBlockRes) + require.Nil(t, err) + + // load the file, checking that it was created with the expected name + fileName := fmt.Sprintf("%s-block-%d-begin", testPrefix, testBeginBlockReq.GetHeader().Height) + fileBytes, err := readInFile(fileName) + require.Nil(t, err) + + // segment the file into the separate gRPC messages and check the correctness of each + segments, err := segmentBytes(fileBytes) + require.Nil(t, err) + require.Equal(t, 5, len(segments)) + require.Equal(t, expectedBeginBlockReqBytes, segments[0]) + require.Equal(t, expectedKVPair1, segments[1]) + require.Equal(t, expectedKVPair2, segments[2]) + require.Equal(t, expectedKVPair3, segments[3]) + require.Equal(t, expectedBeginBlockResBytes, segments[4]) +} + +func testListenDeliverTx1(t *testing.T) { + expectedDeliverTxReq1Bytes, err := testMarshaller.Marshal(&testDeliverTxReq1) + require.Nil(t, err) + expectedDeliverTxRes1Bytes, err := testMarshaller.Marshal(&testDeliverTxRes1) + require.Nil(t, err) + + // write state changes + testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey2, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + + // expected KV pairs + expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey1, + Value: mockValue1, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey2, + Value: mockValue2, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey3, + Value: mockValue3, + Delete: false, + }) + require.Nil(t, err) + + // send the ABCI messages + err = testStreamingService.ListenDeliverTx(emptyContext, testDeliverTxReq1, testDeliverTxRes1) + require.Nil(t, err) + + // load the file, checking that it was created with the expected name + fileName := fmt.Sprintf("%s-block-%d-tx-%d", testPrefix, testBeginBlockReq.GetHeader().Height, 0) + fileBytes, err := readInFile(fileName) + require.Nil(t, err) + + // segment the file into the separate gRPC messages and check the correctness of each + segments, err := segmentBytes(fileBytes) + require.Nil(t, err) + require.Equal(t, 5, len(segments)) + require.Equal(t, expectedDeliverTxReq1Bytes, segments[0]) + require.Equal(t, expectedKVPair1, segments[1]) + require.Equal(t, expectedKVPair2, segments[2]) + require.Equal(t, expectedKVPair3, segments[3]) + require.Equal(t, expectedDeliverTxRes1Bytes, segments[4]) +} + +func testListenDeliverTx2(t *testing.T) { + expectedDeliverTxReq2Bytes, err := testMarshaller.Marshal(&testDeliverTxReq2) + require.Nil(t, err) + expectedDeliverTxRes2Bytes, err := testMarshaller.Marshal(&testDeliverTxRes2) + require.Nil(t, err) + + // write state changes + testListener1.OnWrite(mockStoreKey2, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + + // expected KV pairs + expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey1, + Value: mockValue1, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey2, + Value: mockValue2, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey3, + Value: mockValue3, + Delete: false, + }) + require.Nil(t, err) + + // send the ABCI messages + err = testStreamingService.ListenDeliverTx(emptyContext, testDeliverTxReq2, testDeliverTxRes2) + require.Nil(t, err) + + // load the file, checking that it was created with the expected name + fileName := fmt.Sprintf("%s-block-%d-tx-%d", testPrefix, testBeginBlockReq.GetHeader().Height, 1) + fileBytes, err := readInFile(fileName) + require.Nil(t, err) + + // segment the file into the separate gRPC messages and check the correctness of each + segments, err := segmentBytes(fileBytes) + require.Nil(t, err) + require.Equal(t, 5, len(segments)) + require.Equal(t, expectedDeliverTxReq2Bytes, segments[0]) + require.Equal(t, expectedKVPair1, segments[1]) + require.Equal(t, expectedKVPair2, segments[2]) + require.Equal(t, expectedKVPair3, segments[3]) + require.Equal(t, expectedDeliverTxRes2Bytes, segments[4]) +} + +func testListenEndBlock(t *testing.T) { + expectedEndBlockReqBytes, err := testMarshaller.Marshal(&testEndBlockReq) + require.Nil(t, err) + expectedEndBlockResBytes, err := testMarshaller.Marshal(&testEndBlockRes) + require.Nil(t, err) + + // write state changes + testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + + // expected KV pairs + expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey1, + Value: mockValue1, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey2, + Value: mockValue2, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey3, + Value: mockValue3, + Delete: false, + }) + require.Nil(t, err) + + // send the ABCI messages + err = testStreamingService.ListenEndBlock(emptyContext, testEndBlockReq, testEndBlockRes) + require.Nil(t, err) + + // load the file, checking that it was created with the expected name + fileName := fmt.Sprintf("%s-block-%d-end", testPrefix, testEndBlockReq.Height) + fileBytes, err := readInFile(fileName) + require.Nil(t, err) + + // segment the file into the separate gRPC messages and check the correctness of each + segments, err := segmentBytes(fileBytes) + require.Nil(t, err) + require.Equal(t, 5, len(segments)) + require.Equal(t, expectedEndBlockReqBytes, segments[0]) + require.Equal(t, expectedKVPair1, segments[1]) + require.Equal(t, expectedKVPair2, segments[2]) + require.Equal(t, expectedKVPair3, segments[3]) + require.Equal(t, expectedEndBlockResBytes, segments[4]) +} + +func readInFile(name string) ([]byte, error) { + path := filepath.Join(testDir, name) + return os.ReadFile(path) +} + +// Returns all of the protobuf messages contained in the byte array as an array of byte arrays +// The messages have their length prefix removed +func segmentBytes(bz []byte) ([][]byte, error) { + var err error + segments := make([][]byte, 0) + for len(bz) > 0 { + var segment []byte + segment, bz, err = getHeadSegment(bz) + if err != nil { + return nil, err + } + segments = append(segments, segment) + } + return segments, nil +} + +// Returns the bytes for the leading protobuf object in the byte array (removing the length prefix) and returns the remainder of the byte array +func getHeadSegment(bz []byte) ([]byte, []byte, error) { + size, prefixSize := binary.Uvarint(bz) + if prefixSize < 0 { + return nil, nil, fmt.Errorf("invalid number of bytes read from length-prefixed encoding: %d", prefixSize) + } + if size > uint64(len(bz)-prefixSize) { + return nil, nil, fmt.Errorf("not enough bytes to read; want: %v, got: %v", size, len(bz)-prefixSize) + } + return bz[prefixSize:(uint64(prefixSize) + size)], bz[uint64(prefixSize)+size:], nil +} diff --git a/store/types/iterator_test.go b/store/types/iterator_test.go index 57b072475e..7f0ef7234e 100644 --- a/store/types/iterator_test.go +++ b/store/types/iterator_test.go @@ -13,7 +13,7 @@ import ( func newMemTestKVStore(t *testing.T) types.KVStore { db := dbm.NewMemDB() - store, err := iavl.LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), types.CommitID{}, false, iavl.DefaultIAVLCacheSize) + store, err := iavl.LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), types.CommitID{}, false, iavl.DefaultIAVLCacheSize, false) require.NoError(t, err) return store } diff --git a/store/types/proof.go b/store/types/proof.go index db8f673f46..ac0897ac10 100644 --- a/store/types/proof.go +++ b/store/types/proof.go @@ -79,15 +79,15 @@ func (op CommitmentOp) GetKey() []byte { return op.Key } -// Run takes in a list of arguments and attempts to run the proof op against these arguments +// Run takes in a list of arguments and attempts to run the proof op against these arguments. // Returns the root wrapped in [][]byte if the proof op succeeds with given args. If not, // it will return an error. // // CommitmentOp will accept args of length 1 or length 0 // If length 1 args is passed in, then CommitmentOp will attempt to prove the existence of the key -// with the value provided by args[0] using the embedded CommitmentProof and return the CommitmentRoot of the proof +// with the value provided by args[0] using the embedded CommitmentProof and return the CommitmentRoot of the proof. // If length 0 args is passed in, then CommitmentOp will attempt to prove the absence of the key -// in the CommitmentOp and return the CommitmentRoot of the proof +// in the CommitmentOp and return the CommitmentRoot of the proof. func (op CommitmentOp) Run(args [][]byte) ([][]byte, error) { // calculate root from proof root, err := op.Proof.Calculate() diff --git a/store/types/store.go b/store/types/store.go index 6766011b8f..8bb7037ea9 100644 --- a/store/types/store.go +++ b/store/types/store.go @@ -190,6 +190,12 @@ type CommitMultiStore interface { // SetIAVLCacheSize sets the cache size of the IAVL tree. SetIAVLCacheSize(size int) + + // SetIAVLDisableFastNode enables/disables fastnode feature on iavl. + SetIAVLDisableFastNode(disable bool) + + // RollbackToVersion rollback the db to specific version(height). + RollbackToVersion(version int64) error } //---------subsp------------------------------- diff --git a/testutil/ioutil.go b/testutil/ioutil.go index 6a7e4fff9c..6ff54d24ec 100644 --- a/testutil/ioutil.go +++ b/testutil/ioutil.go @@ -3,7 +3,6 @@ package testutil import ( "bytes" "io" - "io/ioutil" "os" "strings" "testing" @@ -45,8 +44,8 @@ func ApplyMockIODiscardOutErr(c *cobra.Command) BufferReader { mockIn := strings.NewReader("") c.SetIn(mockIn) - c.SetOut(ioutil.Discard) - c.SetErr(ioutil.Discard) + c.SetOut(io.Discard) + c.SetErr(io.Discard) return mockIn } @@ -68,7 +67,7 @@ func WriteToNewTempFile(t testing.TB, s string) *os.File { func TempFile(t testing.TB) *os.File { t.Helper() - fp, err := ioutil.TempFile(t.TempDir(), "") + fp, err := os.CreateTemp(t.TempDir(), "") require.NoError(t, err) return fp diff --git a/testutil/ioutil_test.go b/testutil/ioutil_test.go index 415e7842c1..03250fbab6 100644 --- a/testutil/ioutil_test.go +++ b/testutil/ioutil_test.go @@ -1,7 +1,8 @@ package testutil_test import ( - "io/ioutil" + "io" + "os" "testing" "github.com/spf13/cobra" @@ -28,7 +29,7 @@ func TestWriteToNewTempFile(t *testing.T) { tempfile := testutil.WriteToNewTempFile(t, "test string") tempfile.Close() - bs, err := ioutil.ReadFile(tempfile.Name()) + bs, err := os.ReadFile(tempfile.Name()) require.NoError(t, err) require.Equal(t, "test string", string(bs)) } @@ -39,6 +40,6 @@ func TestApplyMockIODiscardOutErr(t *testing.T) { testutil.ApplyMockIODiscardOutErr(cmd) require.NotEqual(t, cmd.InOrStdin(), oldStdin) - require.Equal(t, cmd.OutOrStdout(), ioutil.Discard) - require.Equal(t, cmd.ErrOrStderr(), ioutil.Discard) + require.Equal(t, cmd.OutOrStdout(), io.Discard) + require.Equal(t, cmd.ErrOrStderr(), io.Discard) } diff --git a/testutil/key.go b/testutil/key.go index 142f53c24e..fc5cfae43c 100644 --- a/testutil/key.go +++ b/testutil/key.go @@ -78,5 +78,5 @@ func GenerateSaveCoinKey( return sdk.AccAddress{}, "", err } - return sdk.AccAddress(info.GetAddress()), secret, nil + return info.GetAddress(), secret, nil } diff --git a/testutil/network/network.go b/testutil/network/network.go index 2aff9b5b13..d4214644c4 100644 --- a/testutil/network/network.go +++ b/testutil/network/network.go @@ -6,7 +6,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "net/http" "net/url" "os" @@ -173,7 +172,7 @@ func New(t *testing.T, cfg Config) *Network { t.Log("acquiring test network lock") lock.Lock() - baseDir, err := ioutil.TempDir(t.TempDir(), cfg.ChainID) + baseDir, err := os.MkdirTemp(t.TempDir(), cfg.ChainID) require.NoError(t, err) t.Logf("created temporary directory: %s", baseDir) diff --git a/testutil/network/util.go b/testutil/network/util.go index 49c34b1fdf..535253bee4 100644 --- a/testutil/network/util.go +++ b/testutil/network/util.go @@ -193,7 +193,7 @@ func initGenFiles(cfg Config, genAccounts []authtypes.GenesisAccount, genBalance } func writeFile(name string, dir string, contents []byte) error { - writePath := filepath.Join(dir) + writePath := filepath.Join(dir) //nolint:gocritic file := filepath.Join(writePath, name) err := tmos.EnsureDir(writePath, 0o755) diff --git a/testutil/rest.go b/testutil/rest.go index b468b16bed..58e6c39b19 100644 --- a/testutil/rest.go +++ b/testutil/rest.go @@ -1,7 +1,7 @@ package testutil import ( - "io/ioutil" + "io" "net/http" ) @@ -25,7 +25,7 @@ func GetRequestWithHeaders(url string, headers map[string]string) ([]byte, error return nil, err } - body, err := ioutil.ReadAll(res.Body) + body, err := io.ReadAll(res.Body) if err != nil { return nil, err } diff --git a/types/address.go b/types/address.go index f3dcfa5be9..d95209a0c3 100644 --- a/types/address.go +++ b/types/address.go @@ -201,7 +201,7 @@ func (aa AccAddress) Equals(aa2 Address) bool { // Returns boolean for whether an AccAddress is empty func (aa AccAddress) Empty() bool { - return aa == nil || len(aa) == 0 + return len(aa) == 0 } // Marshal returns the raw address bytes. It is needed for protobuf @@ -349,7 +349,7 @@ func (va ValAddress) Equals(va2 Address) bool { // Returns boolean for whether an AccAddress is empty func (va ValAddress) Empty() bool { - return va == nil || len(va) == 0 + return len(va) == 0 } // Marshal returns the raw address bytes. It is needed for protobuf @@ -504,7 +504,7 @@ func (ca ConsAddress) Equals(ca2 Address) bool { // Returns boolean for whether an ConsAddress is empty func (ca ConsAddress) Empty() bool { - return ca == nil || len(ca) == 0 + return len(ca) == 0 } // Marshal returns the raw address bytes. It is needed for protobuf diff --git a/types/coin.go b/types/coin.go index 20ae56fead..01358f414b 100644 --- a/types/coin.go +++ b/types/coin.go @@ -395,10 +395,11 @@ func (coins Coins) SafeSub(coinsB Coins) (Coins, bool) { // of AmountOf(D) of the inputs. Note that the result might be not // be equal to either input. For any valid Coins a, b, and c, the // following are always true: -// a.IsAllLTE(a.Max(b)) -// b.IsAllLTE(a.Max(b)) -// a.IsAllLTE(c) && b.IsAllLTE(c) == a.Max(b).IsAllLTE(c) -// a.Add(b...).IsEqual(a.Min(b).Add(a.Max(b)...)) +// +// a.IsAllLTE(a.Max(b)) +// b.IsAllLTE(a.Max(b)) +// a.IsAllLTE(c) && b.IsAllLTE(c) == a.Max(b).IsAllLTE(c) +// a.Add(b...).IsEqual(a.Min(b).Add(a.Max(b)...)) // // E.g. // {1A, 3B, 2C}.Max({4A, 2B, 2C} == {4A, 3B, 2C}) @@ -440,10 +441,11 @@ func (coins Coins) Max(coinsB Coins) Coins { // of AmountOf(D) of the inputs. Note that the result might be not // be equal to either input. For any valid Coins a, b, and c, the // following are always true: -// a.Min(b).IsAllLTE(a) -// a.Min(b).IsAllLTE(b) -// c.IsAllLTE(a) && c.IsAllLTE(b) == c.IsAllLTE(a.Min(b)) -// a.Add(b...).IsEqual(a.Min(b).Add(a.Max(b)...)) +// +// a.Min(b).IsAllLTE(a) +// a.Min(b).IsAllLTE(b) +// c.IsAllLTE(a) && c.IsAllLTE(b) == c.IsAllLTE(a.Min(b)) +// a.Add(b...).IsEqual(a.Min(b).Add(a.Max(b)...)) // // E.g. // {1A, 3B, 2C}.Min({4A, 2B, 2C} == {1A, 2B, 2C}) diff --git a/types/config.go b/types/config.go index 00b701a025..fce6c73526 100644 --- a/types/config.go +++ b/types/config.go @@ -91,7 +91,8 @@ func (config *Config) SetBech32PrefixForAccount(addressPrefix, pubKeyPrefix stri } // SetBech32PrefixForValidator builds the Config with Bech32 addressPrefix and publKeyPrefix for validators -// and returns the config instance +// +// and returns the config instance func (config *Config) SetBech32PrefixForValidator(addressPrefix, pubKeyPrefix string) { config.assertNotSealed() config.bech32AddressPrefix["validator_addr"] = addressPrefix diff --git a/types/context.go b/types/context.go index 6bfd9dabd5..7440bea9b3 100644 --- a/types/context.go +++ b/types/context.go @@ -221,9 +221,12 @@ func (c Context) IsZero() bool { // WithValue is deprecated, provided for backwards compatibility // Please use -// ctx = ctx.WithContext(context.WithValue(ctx.Context(), key, false)) +// +// ctx = ctx.WithContext(context.WithValue(ctx.Context(), key, false)) +// // instead of -// ctx = ctx.WithValue(key, false) +// +// ctx = ctx.WithValue(key, false) func (c Context) WithValue(key, value interface{}) Context { c.ctx = context.WithValue(c.ctx, key, value) return c @@ -231,9 +234,12 @@ func (c Context) WithValue(key, value interface{}) Context { // Value is deprecated, provided for backwards compatibility // Please use -// ctx.Context().Value(key) +// +// ctx.Context().Value(key) +// // instead of -// ctx.Value(key) +// +// ctx.Value(key) func (c Context) Value(key interface{}) interface{} { return c.ctx.Value(key) } diff --git a/types/decimal.go b/types/decimal.go index 951f51c307..3ebdb5c727 100644 --- a/types/decimal.go +++ b/types/decimal.go @@ -130,12 +130,15 @@ func NewDecFromIntWithPrec(i Int, prec int64) Dec { // create a decimal from an input decimal string. // valid must come in the form: -// (-) whole integers (.) decimal integers +// +// (-) whole integers (.) decimal integers +// // examples of acceptable input include: -// -123.456 -// 456.7890 -// 345 -// -456789 +// +// -123.456 +// 456.7890 +// 345 +// -456789 // // NOTE - An error will return if more decimal places // are provided in the string than the constant Precision. diff --git a/types/errors/doc.go b/types/errors/doc.go index 6cca61580d..ed6b9a69bf 100644 --- a/types/errors/doc.go +++ b/types/errors/doc.go @@ -11,24 +11,23 @@ of the errors package. If it will be needed my many extensions, please consider registering it in the errors package. To create a new error instance use Register function. You must provide a unique, non zero error code and a short description, for example: - var ErrZeroDivision = errors.Register(9241, "zero division") + var ErrZeroDivision = errors.Register(9241, "zero division") When returning an error, you can attach to it an additional context information by using Wrap function, for example: - func safeDiv(val, div int) (int, err) { - if div == 0 { - return 0, errors.Wrapf(ErrZeroDivision, "cannot divide %d", val) + func safeDiv(val, div int) (int, err) { + if div == 0 { + return 0, errors.Wrapf(ErrZeroDivision, "cannot divide %d", val) + } + return val / div, nil } - return val / div, nil - } The first time an error instance is wrapped a stacktrace is attached as well. Stacktrace information can be printed using %+v and %v formats. - %s is just the error message - %+v is the full stack trace - %v appends a compressed [filename:line] where the error was created - + %s is just the error message + %+v is the full stack trace + %v appends a compressed [filename:line] where the error was created */ package errors diff --git a/types/errors/errors.go b/types/errors/errors.go index 3160f506b0..2c211f93d6 100644 --- a/types/errors/errors.go +++ b/types/errors/errors.go @@ -15,7 +15,7 @@ const UndefinedCodespace = "undefined" var ( // errInternal should never be exposed, but we reserve this code for non-specified errors - errInternal = Register(UndefinedCodespace, 1, "internal") + errInternal = Register(UndefinedCodespace, 1, "internal") //nolint:unused // ErrTxDecode is returned if we cannot parse a transaction ErrTxDecode = Register(RootCodespace, 2, "tx parse error") diff --git a/types/errors/stacktrace.go b/types/errors/stacktrace.go index f7079c56d8..8b2edc5f4d 100644 --- a/types/errors/stacktrace.go +++ b/types/errors/stacktrace.go @@ -80,7 +80,8 @@ func writeSimpleFrame(s io.Writer, f errors.Frame) { // %s is just the error message // %+v is the full stack trace // %v appends a compressed [filename:line] where the error -// was created +// +// was created // // Inspired by https://github.com/pkg/errors/blob/v0.8.1/errors.go#L162-L176 func (e *wrappedError) Format(s fmt.State, verb rune) { diff --git a/types/events.go b/types/events.go index dd049077a6..2eee87591e 100644 --- a/types/events.go +++ b/types/events.go @@ -3,12 +3,13 @@ package types import ( "encoding/json" "fmt" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" "reflect" "sort" "strings" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" + "github.com/gogo/protobuf/jsonpb" proto "github.com/gogo/protobuf/proto" abci "github.com/tendermint/tendermint/abci/types" diff --git a/types/handler.go b/types/handler.go index 03d1f02d58..87762744ee 100644 --- a/types/handler.go +++ b/types/handler.go @@ -43,21 +43,22 @@ func ChainAnteDecorators(chain ...AnteDecorator) AnteHandler { // Terminator AnteDecorator will get added to the chain to simplify decorator code // Don't need to check if next == nil further up the chain -// ______ -// <((((((\\\ -// / . }\ -// ;--..--._|} -// (\ '--/\--' ) -// \\ | '-' :'| -// \\ . -==- .-| -// \\ \.__.' \--._ -// [\\ __.--| // _/'--. -// \ \\ .'-._ ('-----'/ __/ \ -// \ \\ / __>| | '--. | -// \ \\ | \ | / / / -// \ '\ / \ | | _/ / -// \ \ \ | | / / -// snd \ \ \ / +// +// ______ +// <((((((\\\ +// / . }\ +// ;--..--._|} +// (\ '--/\--' ) +// \\ | '-' :'| +// \\ . -==- .-| +// \\ \.__.' \--._ +// [\\ __.--| // _/'--. +// \ \\ .'-._ ('-----'/ __/ \ +// \ \\ / __>| | '--. | +// \ \\ | \ | / / / +// \ '\ / \ | | _/ / +// \ \ \ | | / / +// snd \ \ \ / type Terminator struct{} // Simply return provided Context and nil error diff --git a/types/module/module.go b/types/module/module.go index e674f90d81..795553e78c 100644 --- a/types/module/module.go +++ b/types/module/module.go @@ -1,10 +1,10 @@ /* Package module contains application module patterns and associated "manager" functionality. The module pattern has been broken down by: - - independent module functionality (AppModuleBasic) - - inter-dependent module genesis functionality (AppModuleGenesis) - - inter-dependent module simulation functionality (AppModuleSimulation) - - inter-dependent module full functionality (AppModule) + - independent module functionality (AppModuleBasic) + - inter-dependent module genesis functionality (AppModuleGenesis) + - inter-dependent module simulation functionality (AppModuleSimulation) + - inter-dependent module full functionality (AppModule) inter-dependent module functionality is module functionality which somehow depends on other modules, typically through the module keeper. Many of the @@ -385,19 +385,21 @@ type VersionMap map[string]uint64 // returning RunMigrations should be enough: // // Example: -// cfg := module.NewConfigurator(...) -// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) { -// return app.mm.RunMigrations(ctx, cfg, fromVM) -// }) +// +// cfg := module.NewConfigurator(...) +// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) { +// return app.mm.RunMigrations(ctx, cfg, fromVM) +// }) // // Internally, RunMigrations will perform the following steps: // - create an `updatedVM` VersionMap of module with their latest ConsensusVersion // - make a diff of `fromVM` and `udpatedVM`, and for each module: -// - if the module's `fromVM` version is less than its `updatedVM` version, -// then run in-place store migrations for that module between those versions. -// - if the module does not exist in the `fromVM` (which means that it's a new module, -// because it was not in the previous x/upgrade's store), then run -// `InitGenesis` on that module. +// - if the module's `fromVM` version is less than its `updatedVM` version, +// then run in-place store migrations for that module between those versions. +// - if the module does not exist in the `fromVM` (which means that it's a new module, +// because it was not in the previous x/upgrade's store), then run +// `InitGenesis` on that module. +// // - return the `updatedVM` to be persisted in the x/upgrade's store. // // Migrations are run in an order defined by `Manager.OrderMigrations` or (if not set) defined by @@ -410,18 +412,19 @@ type VersionMap map[string]uint64 // running anything for foo. // // Example: -// cfg := module.NewConfigurator(...) -// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) { -// // Assume "foo" is a new module. -// // `fromVM` is fetched from existing x/upgrade store. Since foo didn't exist -// // before this upgrade, `v, exists := fromVM["foo"]; exists == false`, and RunMigration will by default -// // run InitGenesis on foo. -// // To skip running foo's InitGenesis, you need set `fromVM`'s foo to its latest -// // consensus version: -// fromVM["foo"] = foo.AppModule{}.ConsensusVersion() // -// return app.mm.RunMigrations(ctx, cfg, fromVM) -// }) +// cfg := module.NewConfigurator(...) +// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) { +// // Assume "foo" is a new module. +// // `fromVM` is fetched from existing x/upgrade store. Since foo didn't exist +// // before this upgrade, `v, exists := fromVM["foo"]; exists == false`, and RunMigration will by default +// // run InitGenesis on foo. +// // To skip running foo's InitGenesis, you need set `fromVM`'s foo to its latest +// // consensus version: +// fromVM["foo"] = foo.AppModule{}.ConsensusVersion() +// +// return app.mm.RunMigrations(ctx, cfg, fromVM) +// }) // // Please also refer to docs/core/upgrade.md for more information. func (m Manager) RunMigrations(ctx sdk.Context, cfg Configurator, fromVM VersionMap) (VersionMap, error) { diff --git a/types/rest/rest.go b/types/rest/rest.go index 1f4d1f50c4..79ed92fe12 100644 --- a/types/rest/rest.go +++ b/types/rest/rest.go @@ -7,7 +7,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strconv" @@ -135,7 +135,7 @@ func (br BaseReq) ValidateBasic(w http.ResponseWriter) bool { // ReadRESTReq reads and unmarshals a Request's body to the the BaseReq struct. // Writes an error response to ResponseWriter and returns false if errors occurred. func ReadRESTReq(w http.ResponseWriter, r *http.Request, cdc *codec.LegacyAmino, req interface{}) bool { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if CheckBadRequestError(w, err) { return false } @@ -411,12 +411,12 @@ func ParseQueryParamBool(r *http.Request, param string) bool { // GetRequest defines a wrapper around an HTTP GET request with a provided URL. // An error is returned if the request or reading the body fails. func GetRequest(url string) ([]byte, error) { - res, err := http.Get(url) // nolint:gosec + res, err := http.Get(url) //nolint:gosec if err != nil { return nil, err } - body, err := ioutil.ReadAll(res.Body) + body, err := io.ReadAll(res.Body) if err != nil { return nil, err } @@ -431,12 +431,12 @@ func GetRequest(url string) ([]byte, error) { // PostRequest defines a wrapper around an HTTP POST request with a provided URL and data. // An error is returned if the request or reading the body fails. func PostRequest(url string, contentType string, data []byte) ([]byte, error) { - res, err := http.Post(url, contentType, bytes.NewBuffer(data)) // nolint:gosec + res, err := http.Post(url, contentType, bytes.NewBuffer(data)) //nolint:gosec if err != nil { return nil, fmt.Errorf("error while sending post request: %w", err) } - bz, err := ioutil.ReadAll(res.Body) + bz, err := io.ReadAll(res.Body) if err != nil { return nil, fmt.Errorf("error reading response body: %w", err) } diff --git a/types/rest/rest_test.go b/types/rest/rest_test.go index c9fe5bbeb4..a4a0adf355 100644 --- a/types/rest/rest_test.go +++ b/types/rest/rest_test.go @@ -3,7 +3,6 @@ package rest_test import ( "errors" "io" - "io/ioutil" "net/http" "net/http/httptest" "sort" @@ -228,7 +227,7 @@ func TestProcessPostResponse(t *testing.T) { func TestReadRESTReq(t *testing.T) { t.Parallel() - reqBody := ioutil.NopCloser(strings.NewReader(`{"chain_id":"alessio","memo":"text"}`)) + reqBody := io.NopCloser(strings.NewReader(`{"chain_id":"alessio","memo":"text"}`)) req := &http.Request{Body: reqBody} w := httptest.NewRecorder() var br rest.BaseReq @@ -241,7 +240,7 @@ func TestReadRESTReq(t *testing.T) { require.Equal(t, http.StatusOK, res.StatusCode) // test non valid JSON - reqBody = ioutil.NopCloser(strings.NewReader(`MALFORMED`)) + reqBody = io.NopCloser(strings.NewReader(`MALFORMED`)) req = &http.Request{Body: reqBody} br = rest.BaseReq{} w = httptest.NewRecorder() @@ -259,7 +258,7 @@ func TestWriteSimulationResponse(t *testing.T) { res := w.Result() //nolint:bodyclose t.Cleanup(func() { res.Body.Close() }) require.Equal(t, http.StatusOK, res.StatusCode) - bs, err := ioutil.ReadAll(res.Body) + bs, err := io.ReadAll(res.Body) require.NoError(t, err) t.Cleanup(func() { res.Body.Close() }) require.Equal(t, `{"gas_estimate":"10"}`, string(bs)) @@ -322,7 +321,7 @@ func TestPostProcessResponseBare(t *testing.T) { res := w.Result() //nolint:bodyclose require.Equal(t, http.StatusOK, res.StatusCode) - got, err := ioutil.ReadAll(res.Body) + got, err := io.ReadAll(res.Body) require.NoError(t, err) t.Cleanup(func() { res.Body.Close() }) @@ -340,7 +339,7 @@ func TestPostProcessResponseBare(t *testing.T) { res = w.Result() //nolint:bodyclose require.Equal(t, http.StatusOK, res.StatusCode) - got, err = ioutil.ReadAll(res.Body) + got, err = io.ReadAll(res.Body) require.NoError(t, err) t.Cleanup(func() { res.Body.Close() }) @@ -358,7 +357,7 @@ func TestPostProcessResponseBare(t *testing.T) { res = w.Result() //nolint:bodyclose require.Equal(t, http.StatusOK, res.StatusCode) - got, err = ioutil.ReadAll(res.Body) + got, err = io.ReadAll(res.Body) require.NoError(t, err) t.Cleanup(func() { res.Body.Close() }) @@ -373,7 +372,7 @@ func TestPostProcessResponseBare(t *testing.T) { res = w.Result() //nolint:bodyclose require.Equal(t, http.StatusInternalServerError, res.StatusCode) - got, err = ioutil.ReadAll(res.Body) + got, err = io.ReadAll(res.Body) require.NoError(t, err) t.Cleanup(func() { res.Body.Close() }) @@ -400,7 +399,7 @@ func runPostProcessResponse(t *testing.T, ctx client.Context, obj interface{}, e resp := w.Result() //nolint:bodyclose t.Cleanup(func() { resp.Body.Close() }) - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) require.Nil(t, err) require.Equal(t, expectedBody, body) @@ -415,7 +414,7 @@ func runPostProcessResponse(t *testing.T, ctx client.Context, obj interface{}, e resp = w.Result() //nolint:bodyclose t.Cleanup(func() { resp.Body.Close() }) - body, err = ioutil.ReadAll(resp.Body) + body, err = io.ReadAll(resp.Body) require.Nil(t, err) require.Equal(t, string(expectedBody), string(body)) diff --git a/types/tx/types.go b/types/tx/types.go index 804f70210b..8c10b56992 100644 --- a/types/tx/types.go +++ b/types/tx/types.go @@ -13,8 +13,10 @@ import ( const MaxGasWanted = uint64((1 << 63) - 1) // Interface implementation checks. -var _, _, _, _ codectypes.UnpackInterfacesMessage = &Tx{}, &TxBody{}, &AuthInfo{}, &SignerInfo{} -var _ sdk.Tx = &Tx{} +var ( + _, _, _, _ codectypes.UnpackInterfacesMessage = &Tx{}, &TxBody{}, &AuthInfo{}, &SignerInfo{} + _ sdk.Tx = &Tx{} +) // GetMsgs implements the GetMsgs method on sdk.Tx. func (t *Tx) GetMsgs() []sdk.Msg { diff --git a/version/version.go b/version/version.go index cfb37683f1..a41834e336 100644 --- a/version/version.go +++ b/version/version.go @@ -3,17 +3,17 @@ // produces apps versioning information based on flags // passed at compile time. // -// Configure the version command +// # Configure the version command // // The version command can be just added to your cobra root command. // At build time, the variables Name, Version, Commit, and BuildTags // can be passed as build flags as shown in the following example: // -// go build -X github.com/cosmos/cosmos-sdk/version.Name=gaia \ -// -X github.com/cosmos/cosmos-sdk/version.AppName=gaiad \ -// -X github.com/cosmos/cosmos-sdk/version.Version=1.0 \ -// -X github.com/cosmos/cosmos-sdk/version.Commit=f0f7b7dab7e36c20b757cebce0e8f4fc5b95de60 \ -// -X "github.com/cosmos/cosmos-sdk/version.BuildTags=linux darwin amd64" +// go build -X github.com/cosmos/cosmos-sdk/version.Name=gaia \ +// -X github.com/cosmos/cosmos-sdk/version.AppName=gaiad \ +// -X github.com/cosmos/cosmos-sdk/version.Version=1.0 \ +// -X github.com/cosmos/cosmos-sdk/version.Commit=f0f7b7dab7e36c20b757cebce0e8f4fc5b95de60 \ +// -X "github.com/cosmos/cosmos-sdk/version.BuildTags=linux darwin amd64" package version import ( diff --git a/x/auth/ante/fee.go b/x/auth/ante/fee.go index 7c8fc510f9..84bade0186 100644 --- a/x/auth/ante/fee.go +++ b/x/auth/ante/fee.go @@ -79,7 +79,7 @@ func (dfd DeductFeeDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bo } if addr := dfd.ak.GetModuleAddress(types.FeeCollectorName); addr == nil { - return ctx, fmt.Errorf("Fee collector module account (%s) has not been set", types.FeeCollectorName) + return ctx, fmt.Errorf("fee collector module account (%s) has not been set", types.FeeCollectorName) } fee := feeTx.GetFee() diff --git a/x/auth/client/cli/tx_multisign.go b/x/auth/client/cli/tx_multisign.go index d15dfa5451..5c8d6a1de1 100644 --- a/x/auth/client/cli/tx_multisign.go +++ b/x/auth/client/cli/tx_multisign.go @@ -2,7 +2,6 @@ package cli import ( "fmt" - "io/ioutil" "os" "strings" @@ -376,14 +375,14 @@ func makeBatchMultisignCmd() func(cmd *cobra.Command, args []string) error { func unmarshalSignatureJSON(clientCtx client.Context, filename string) (sigs []signingtypes.SignatureV2, err error) { var bytes []byte - if bytes, err = ioutil.ReadFile(filename); err != nil { + if bytes, err = os.ReadFile(filename); err != nil { return } return clientCtx.TxConfig.UnmarshalSignatureJSON(bytes) } func readSignaturesFromFile(ctx client.Context, filename string) (sigs []signingtypes.SignatureV2, err error) { - bz, err := ioutil.ReadFile(filename) + bz, err := os.ReadFile(filename) if err != nil { return nil, err } diff --git a/x/auth/client/rest/decode.go b/x/auth/client/rest/decode.go index 5b732fa0a1..725991fd61 100644 --- a/x/auth/client/rest/decode.go +++ b/x/auth/client/rest/decode.go @@ -3,7 +3,7 @@ package rest import ( "encoding/base64" "fmt" - "io/ioutil" + "io" "net/http" "github.com/cosmos/cosmos-sdk/client" @@ -31,7 +31,7 @@ func DecodeTxRequestHandlerFn(clientCtx client.Context) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { var req DecodeReq - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if rest.CheckBadRequestError(w, err) { return } diff --git a/x/auth/client/testutil/suite.go b/x/auth/client/testutil/suite.go index c916ef7589..9e6fd2f034 100644 --- a/x/auth/client/testutil/suite.go +++ b/x/auth/client/testutil/suite.go @@ -5,7 +5,7 @@ import ( "encoding/base64" "encoding/json" "fmt" - "io/ioutil" + "os" "path/filepath" "strings" "testing" @@ -199,7 +199,7 @@ func (s *IntegrationTestSuite) TestCLISignAminoJSON() { fileFlag := fmt.Sprintf("--%s=%s", flags.FlagOutputDocument, filenameSigned) _, err = TxSignExec(val1.ClientCtx, val1.Address, fileUnsigned.Name(), chainFlag, fileFlag, signModeAminoFlag) require.NoError(err) - fContent, err := ioutil.ReadFile(filenameSigned) + fContent, err := os.ReadFile(filenameSigned) require.NoError(err) require.Equal(res.String(), string(fContent)) @@ -1133,7 +1133,7 @@ func (s *IntegrationTestSuite) TestGetAccountsCmd() { func TestGetBroadcastCommandOfflineFlag(t *testing.T) { clientCtx := client.Context{}.WithOffline(true) - clientCtx = clientCtx.WithTxConfig(simapp.MakeTestEncodingConfig().TxConfig) + clientCtx = clientCtx.WithTxConfig(simapp.MakeTestEncodingConfig().TxConfig) //nolint:staticcheck cmd := authcli.GetBroadcastCommand() _ = testutil.ApplyMockIODiscardOutErr(cmd) diff --git a/x/auth/client/tx.go b/x/auth/client/tx.go index 5d94d5be7e..970db7bd23 100644 --- a/x/auth/client/tx.go +++ b/x/auth/client/tx.go @@ -5,7 +5,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "os" "strings" @@ -98,9 +97,9 @@ func ReadTxFromFile(ctx client.Context, filename string) (tx sdk.Tx, err error) var bytes []byte if filename == "-" { - bytes, err = ioutil.ReadAll(os.Stdin) + bytes, err = io.ReadAll(os.Stdin) } else { - bytes, err = ioutil.ReadFile(filename) + bytes, err = os.ReadFile(filename) } if err != nil { diff --git a/x/auth/keeper/keeper.go b/x/auth/keeper/keeper.go index 8c41d08e74..811d9febe6 100644 --- a/x/auth/keeper/keeper.go +++ b/x/auth/keeper/keeper.go @@ -222,7 +222,7 @@ func (ak AccountKeeper) decodeAccount(bz []byte) types.AccountI { } // MarshalAccount protobuf serializes an Account interface -func (ak AccountKeeper) MarshalAccount(accountI types.AccountI) ([]byte, error) { // nolint:interfacer +func (ak AccountKeeper) MarshalAccount(accountI types.AccountI) ([]byte, error) { //nolint:interfacer return ak.cdc.MarshalInterface(accountI) } diff --git a/x/auth/legacy/v038/types.go b/x/auth/legacy/v038/types.go index a46c9d26e8..c00f9d2770 100644 --- a/x/auth/legacy/v038/types.go +++ b/x/auth/legacy/v038/types.go @@ -19,7 +19,7 @@ import ( cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/bech32/legacybech32" + "github.com/cosmos/cosmos-sdk/types/bech32/legacybech32" //nolint:staticcheck v034auth "github.com/cosmos/cosmos-sdk/x/auth/legacy/v034" ) @@ -178,7 +178,7 @@ func (acc BaseAccount) MarshalJSON() ([]byte, error) { } if acc.PubKey != nil { - pks, err := legacybech32.MarshalPubKey(legacybech32.AccPK, acc.PubKey) + pks, err := legacybech32.MarshalPubKey(legacybech32.AccPK, acc.PubKey) //nolint:staticcheck if err != nil { return nil, err } @@ -197,7 +197,7 @@ func (acc *BaseAccount) UnmarshalJSON(bz []byte) error { } if alias.PubKey != "" { - pk, err := legacybech32.UnmarshalPubKey(legacybech32.AccPK, alias.PubKey) + pk, err := legacybech32.UnmarshalPubKey(legacybech32.AccPK, alias.PubKey) //nolint:staticcheck if err != nil { return err } @@ -243,7 +243,7 @@ func (bva BaseVestingAccount) MarshalJSON() ([]byte, error) { } if bva.PubKey != nil { - pks, err := legacybech32.MarshalPubKey(legacybech32.AccPK, bva.PubKey) + pks, err := legacybech32.MarshalPubKey(legacybech32.AccPK, bva.PubKey) //nolint:staticcheck if err != nil { return nil, err } @@ -267,7 +267,7 @@ func (bva *BaseVestingAccount) UnmarshalJSON(bz []byte) error { ) if alias.PubKey != "" { - pk, err = legacybech32.UnmarshalPubKey(legacybech32.AccPK, alias.PubKey) + pk, err = legacybech32.UnmarshalPubKey(legacybech32.AccPK, alias.PubKey) //nolint:staticcheck if err != nil { return err } @@ -312,7 +312,7 @@ func (cva ContinuousVestingAccount) MarshalJSON() ([]byte, error) { } if cva.PubKey != nil { - pks, err := legacybech32.MarshalPubKey(legacybech32.AccPK, cva.PubKey) + pks, err := legacybech32.MarshalPubKey(legacybech32.AccPK, cva.PubKey) //nolint:staticcheck if err != nil { return nil, err } @@ -336,7 +336,7 @@ func (cva *ContinuousVestingAccount) UnmarshalJSON(bz []byte) error { ) if alias.PubKey != "" { - pk, err = legacybech32.UnmarshalPubKey(legacybech32.AccPK, alias.PubKey) + pk, err = legacybech32.UnmarshalPubKey(legacybech32.AccPK, alias.PubKey) //nolint:staticcheck if err != nil { return err } @@ -378,7 +378,7 @@ func (dva DelayedVestingAccount) MarshalJSON() ([]byte, error) { } if dva.PubKey != nil { - pks, err := legacybech32.MarshalPubKey(legacybech32.AccPK, dva.PubKey) + pks, err := legacybech32.MarshalPubKey(legacybech32.AccPK, dva.PubKey) //nolint:staticcheck if err != nil { return nil, err } @@ -402,7 +402,7 @@ func (dva *DelayedVestingAccount) UnmarshalJSON(bz []byte) error { ) if alias.PubKey != "" { - pk, err = legacybech32.UnmarshalPubKey(legacybech32.AccPK, alias.PubKey) + pk, err = legacybech32.UnmarshalPubKey(legacybech32.AccPK, alias.PubKey) //nolint:staticcheck if err != nil { return err } diff --git a/x/auth/tx/service.go b/x/auth/tx/service.go index e00b7a519a..6c2a9c364e 100644 --- a/x/auth/tx/service.go +++ b/x/auth/tx/service.go @@ -9,7 +9,7 @@ import ( sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" gogogrpc "github.com/gogo/protobuf/grpc" - "github.com/golang/protobuf/proto" // nolint: staticcheck + "github.com/golang/protobuf/proto" //nolint: staticcheck "github.com/grpc-ecosystem/grpc-gateway/runtime" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -119,7 +119,7 @@ func (s txServer) Simulate(ctx context.Context, req *txtypes.SimulateRequest) (* gasInfo, result, err := s.simulate(txBytes) if err != nil { - return nil, err + return nil, status.Errorf(codes.Unknown, "%v With gas wanted: '%d' and gas used: '%d' ", err, gasInfo.GasWanted, gasInfo.GasUsed) } return &txtypes.SimulateResponse{ diff --git a/x/auth/types/account.go b/x/auth/types/account.go index c0ef8e5da6..e93a80e795 100644 --- a/x/auth/types/account.go +++ b/x/auth/types/account.go @@ -26,6 +26,7 @@ var ( ) // NewBaseAccount creates a new BaseAccount object +// //nolint:interfacer func NewBaseAccount(address sdk.AccAddress, pubKey cryptotypes.PubKey, accountNumber, sequence uint64) *BaseAccount { acc := &BaseAccount{ diff --git a/x/auth/types/params.go b/x/auth/types/params.go index 3b9dbe9592..9551e0c847 100644 --- a/x/auth/types/params.go +++ b/x/auth/types/params.go @@ -71,8 +71,10 @@ func DefaultParams() Params { // SigVerifyCostSecp256r1 returns gas fee of secp256r1 signature verification. // Set by benchmarking current implementation: -// BenchmarkSig/secp256k1 4334 277167 ns/op 4128 B/op 79 allocs/op -// BenchmarkSig/secp256r1 10000 108769 ns/op 1672 B/op 33 allocs/op +// +// BenchmarkSig/secp256k1 4334 277167 ns/op 4128 B/op 79 allocs/op +// BenchmarkSig/secp256r1 10000 108769 ns/op 1672 B/op 33 allocs/op +// // Based on the results above secp256k1 is 2.7x is slwer. However we propose to discount it // because we are we don't compare the cgo implementation of secp256k1, which is faster. func (p Params) SigVerifyCostSecp256r1() uint64 { diff --git a/x/auth/vesting/types/msgs.go b/x/auth/vesting/types/msgs.go index 3308e03048..a00f881906 100644 --- a/x/auth/vesting/types/msgs.go +++ b/x/auth/vesting/types/msgs.go @@ -11,6 +11,7 @@ const TypeMsgCreateVestingAccount = "msg_create_vesting_account" var _ sdk.Msg = &MsgCreateVestingAccount{} // NewMsgCreateVestingAccount returns a reference to a new MsgCreateVestingAccount. +// //nolint:interfacer func NewMsgCreateVestingAccount(fromAddr, toAddr sdk.AccAddress, amount sdk.Coins, endTime int64, delayed bool) *MsgCreateVestingAccount { return &MsgCreateVestingAccount{ diff --git a/x/authz/client/rest/grpc_query_test.go b/x/authz/client/rest/grpc_query_test.go index 5ab0d1de32..8c94031fc1 100644 --- a/x/authz/client/rest/grpc_query_test.go +++ b/x/authz/client/rest/grpc_query_test.go @@ -5,7 +5,6 @@ package rest_test import ( "fmt" - "testing" "time" "github.com/stretchr/testify/suite" @@ -257,7 +256,7 @@ func (s *IntegrationTestSuite) TestQueryGranterGrantsGRPC() { }, { "no authorizations found", - fmt.Sprintf("%s/cosmos/authz/v1beta1/grants/granter/%s", val.APIAddress, grantee.String()), + fmt.Sprintf("%s/cosmos/authz/v1beta1/grants/granter/%s", val.APIAddress, string(grantee)), false, "", 0, @@ -316,7 +315,7 @@ func (s *IntegrationTestSuite) TestQueryGranteeGrantsGRPC() { }, { "valid query", - fmt.Sprintf("%s/cosmos/authz/v1beta1/grants/grantee/%s", val.APIAddress, grantee.String()), + fmt.Sprintf("%s/cosmos/authz/v1beta1/grants/grantee/%s", val.APIAddress, string(grantee)), false, "", 1, diff --git a/x/authz/msgs.go b/x/authz/msgs.go index e8ecde0553..0cf0a31275 100644 --- a/x/authz/msgs.go +++ b/x/authz/msgs.go @@ -27,6 +27,7 @@ var ( ) // NewMsgGrant creates a new MsgGrant +// //nolint:interfacer func NewMsgGrant(granter sdk.AccAddress, grantee sdk.AccAddress, a Authorization, expiration time.Time) (*MsgGrant, error) { m := &MsgGrant{ @@ -120,6 +121,7 @@ func (msg MsgGrant) UnpackInterfaces(unpacker cdctypes.AnyUnpacker) error { } // NewMsgRevoke creates a new MsgRevoke +// //nolint:interfacer func NewMsgRevoke(granter sdk.AccAddress, grantee sdk.AccAddress, msgTypeURL string) MsgRevoke { return MsgRevoke{ @@ -176,6 +178,7 @@ func (msg MsgRevoke) GetSignBytes() []byte { } // NewMsgExec creates a new MsgExecAuthorized +// //nolint:interfacer func NewMsgExec(grantee sdk.AccAddress, msgs []sdk.Msg) MsgExec { msgsAny := make([]*cdctypes.Any, len(msgs)) diff --git a/x/authz/simulation/operations.go b/x/authz/simulation/operations.go index 470090b554..3d57041fed 100644 --- a/x/authz/simulation/operations.go +++ b/x/authz/simulation/operations.go @@ -27,6 +27,8 @@ var ( ) // Simulation operation weights constants +// +//nolint:gosec const ( OpWeightMsgGrant = "op_weight_msg_grant" OpWeightRevoke = "op_weight_msg_revoke" @@ -115,7 +117,6 @@ func SimulateMsgGrant(ak authz.AccountKeeper, bk authz.BankKeeper, _ keeper.Keep } txCfg := simappparams.MakeTestEncodingConfig().TxConfig tx, err := helpers.GenTx( - r, txCfg, []sdk.Msg{msg}, fees, @@ -182,7 +183,6 @@ func SimulateMsgRevoke(ak authz.AccountKeeper, bk authz.BankKeeper, k keeper.Kee txCfg := simappparams.MakeTestEncodingConfig().TxConfig account := ak.GetAccount(ctx, granterAddr) tx, err := helpers.GenTx( - r, txCfg, []sdk.Msg{&msg}, fees, @@ -241,10 +241,6 @@ func SimulateMsgExec(ak authz.AccountKeeper, bk authz.BankKeeper, k keeper.Keepe granterspendableCoins := bk.SpendableCoins(ctx, granterAddr) coins := simtypes.RandSubsetCoins(r, granterspendableCoins) - // if coins slice is empty, we can not create valid banktype.MsgSend - if len(coins) == 0 { - return simtypes.NoOpMsg(authz.ModuleName, TypeMsgExec, "empty coins slice"), nil, nil - } // Check send_enabled status of each sent coin denom if err := bk.IsSendEnabledCoins(ctx, coins...); err != nil { return simtypes.NoOpMsg(authz.ModuleName, TypeMsgExec, err.Error()), nil, nil @@ -277,7 +273,6 @@ func SimulateMsgExec(ak authz.AccountKeeper, bk authz.BankKeeper, k keeper.Keepe txCfg := simappparams.MakeTestEncodingConfig().TxConfig granteeAcc := ak.GetAccount(ctx, granteeAddr) tx, err := helpers.GenTx( - r, txCfg, []sdk.Msg{&msgExec}, fees, diff --git a/x/bank/client/cli/tx.go b/x/bank/client/cli/tx.go index 4fee9ffd1e..6d2f51d486 100644 --- a/x/bank/client/cli/tx.go +++ b/x/bank/client/cli/tx.go @@ -38,17 +38,20 @@ ignored as it is implied from [from_key_or_address].`, if err != nil { return err } - toAddr, err := sdk.AccAddressFromBech32(args[1]) - if err != nil { - return err - } coins, err := sdk.ParseCoinsNormalized(args[2]) if err != nil { return err } - msg := types.NewMsgSend(clientCtx.GetFromAddress(), toAddr, coins) + msg := &types.MsgSend{ + FromAddress: clientCtx.GetFromAddress().String(), + ToAddress: args[1], + Amount: coins, + } + if err := msg.ValidateBasic(); err != nil { + return err + } return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) }, diff --git a/x/bank/keeper/keeper.go b/x/bank/keeper/keeper.go index ac4ac8c76f..6c314029d6 100644 --- a/x/bank/keeper/keeper.go +++ b/x/bank/keeper/keeper.go @@ -115,7 +115,8 @@ func NewBaseKeeper( // WithMintCoinsRestriction restricts the bank Keeper used within a specific module to // have restricted permissions on minting via function passed in parameter. // Previous restriction functions can be nested as such: -// bankKeeper.WithMintCoinsRestriction(restriction1).WithMintCoinsRestriction(restriction2) +// +// bankKeeper.WithMintCoinsRestriction(restriction1).WithMintCoinsRestriction(restriction2) func (k BaseKeeper) WithMintCoinsRestriction(check MintingRestrictionFn) BaseKeeper { oldRestrictionFn := k.mintCoinsRestrictionFn k.mintCoinsRestrictionFn = func(ctx sdk.Context, coins sdk.Coins) error { diff --git a/x/bank/legacy/v040/types.go b/x/bank/legacy/v040/types.go index 11fdf1dfe8..227a43fd90 100644 --- a/x/bank/legacy/v040/types.go +++ b/x/bank/legacy/v040/types.go @@ -1,7 +1,7 @@ package v040 import ( - "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/proto" //nolint:staticcheck codectypes "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/x/bank/types" @@ -26,6 +26,6 @@ func RegisterInterfaces(registry codectypes.InterfaceRegistry) { registry.RegisterInterface( "cosmos.bank.v1beta1.SupplyI", (*SupplyI)(nil), - &types.Supply{}, + &types.Supply{}, //nolint:staticcheck ) } diff --git a/x/bank/legacy/v043/store.go b/x/bank/legacy/v043/store.go index aa8f863298..342b900d41 100644 --- a/x/bank/legacy/v043/store.go +++ b/x/bank/legacy/v043/store.go @@ -32,7 +32,7 @@ func migrateSupply(store sdk.KVStore, cdc codec.BinaryCodec) error { // We're sure that SupplyI is a Supply struct, there's no other // implementation. - oldSupply := oldSupplyI.(*types.Supply) + oldSupply := oldSupplyI.(*types.Supply) //nolint:staticcheck for i := range oldSupply.Total { coin := oldSupply.Total[i] coinBz, err := coin.Amount.Marshal() diff --git a/x/bank/module.go b/x/bank/module.go index 8035240795..000c72070a 100644 --- a/x/bank/module.go +++ b/x/bank/module.go @@ -38,6 +38,10 @@ type AppModuleBasic struct { cdc codec.Codec } +func NewAppModuleBasic(cdc codec.Codec) AppModuleBasic { + return AppModuleBasic{cdc} +} + // Name returns the bank module's name. func (AppModuleBasic) Name() string { return types.ModuleName } diff --git a/x/bank/simulation/operations.go b/x/bank/simulation/operations.go index 9eba149941..aa12c0e047 100644 --- a/x/bank/simulation/operations.go +++ b/x/bank/simulation/operations.go @@ -18,8 +18,8 @@ import ( // Simulation operation weights constants const ( - OpWeightMsgSend = "op_weight_msg_send" - OpWeightMsgMultiSend = "op_weight_msg_multisend" + OpWeightMsgSend = "op_weight_msg_send" //nolint:gosec + OpWeightMsgMultiSend = "op_weight_msg_multisend" //nolint:gosec ) // WeightedOperations returns all the operations from the module with their respective weights @@ -59,10 +59,6 @@ func SimulateMsgSend(ak types.AccountKeeper, bk keeper.Keeper) simtypes.Operatio accs []simtypes.Account, chainID string, ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { from, to, coins, skip := randomSendFields(r, ctx, accs, bk, ak) - // if coins slice is empty, we can not create valid types.MsgSend - if len(coins) == 0 { - return simtypes.NoOpMsg(types.ModuleName, types.TypeMsgSend, "empty coins slice"), nil, nil - } // Check send_enabled status of each coin denom if err := bk.IsSendEnabledCoins(ctx, coins...); err != nil { @@ -97,10 +93,6 @@ func SimulateMsgSendToModuleAccount(ak types.AccountKeeper, bk keeper.Keeper, mo spendable := bk.SpendableCoins(ctx, from.Address) coins := simtypes.RandSubsetCoins(r, spendable) - // if coins slice is empty, we can not create valid types.MsgSend - if len(coins) == 0 { - return simtypes.NoOpMsg(types.ModuleName, types.TypeMsgSend, "empty coins slice"), nil, nil - } // Check send_enabled status of each coin denom if err := bk.IsSendEnabledCoins(ctx, coins...); err != nil { @@ -145,7 +137,6 @@ func sendMsgSend( } txGen := simappparams.MakeTestEncodingConfig().TxConfig tx, err := helpers.GenTx( - r, txGen, []sdk.Msg{msg}, fees, @@ -359,7 +350,6 @@ func sendMsgMultiSend( txGen := simappparams.MakeTestEncodingConfig().TxConfig tx, err := helpers.GenTx( - r, txGen, []sdk.Msg{msg}, fees, diff --git a/x/bank/types/metadata.go b/x/bank/types/metadata.go index 9583b85ce4..a6df1bf426 100644 --- a/x/bank/types/metadata.go +++ b/x/bank/types/metadata.go @@ -9,12 +9,12 @@ import ( ) // Validate performs a basic validation of the coin metadata fields. It checks: -// - Name and Symbol are not blank -// - Base and Display denominations are valid coin denominations -// - Base and Display denominations are present in the DenomUnit slice -// - Base denomination has exponent 0 -// - Denomination units are sorted in ascending order -// - Denomination units not duplicated +// - Name and Symbol are not blank +// - Base and Display denominations are valid coin denominations +// - Base and Display denominations are present in the DenomUnit slice +// - Base denomination has exponent 0 +// - Denomination units are sorted in ascending order +// - Denomination units not duplicated func (m Metadata) Validate() error { if strings.TrimSpace(m.Name) == "" { return errors.New("name field cannot be blank") diff --git a/x/bank/types/msgs.go b/x/bank/types/msgs.go index 22fdc30b38..f5ce141724 100644 --- a/x/bank/types/msgs.go +++ b/x/bank/types/msgs.go @@ -14,6 +14,7 @@ const ( var _ sdk.Msg = &MsgSend{} // NewMsgSend - construct a msg to send coins from one account to another. +// //nolint:interfacer func NewMsgSend(fromAddr, toAddr sdk.AccAddress, amount sdk.Coins) *MsgSend { return &MsgSend{FromAddress: fromAddr.String(), ToAddress: toAddr.String(), Amount: amount} @@ -125,6 +126,7 @@ func (in Input) ValidateBasic() error { } // NewInput - create a transaction input, used with MsgMultiSend +// //nolint:interfacer func NewInput(addr sdk.AccAddress, coins sdk.Coins) Input { return Input{ @@ -152,6 +154,7 @@ func (out Output) ValidateBasic() error { } // NewOutput - create a transaction output, used with MsgMultiSend +// //nolint:interfacer func NewOutput(addr sdk.AccAddress, coins sdk.Coins) Output { return Output{ diff --git a/x/bank/types/querier.go b/x/bank/types/querier.go index 117b87f17c..3ffe7a320f 100644 --- a/x/bank/types/querier.go +++ b/x/bank/types/querier.go @@ -14,12 +14,14 @@ const ( ) // NewQueryBalanceRequest creates a new instance of QueryBalanceRequest. +// //nolint:interfacer func NewQueryBalanceRequest(addr sdk.AccAddress, denom string) *QueryBalanceRequest { return &QueryBalanceRequest{Address: addr.String(), Denom: denom} } // NewQueryAllBalancesRequest creates a new instance of QueryAllBalancesRequest. +// //nolint:interfacer func NewQueryAllBalancesRequest(addr sdk.AccAddress, req *query.PageRequest) *QueryAllBalancesRequest { return &QueryAllBalancesRequest{Address: addr.String(), Pagination: req} @@ -27,7 +29,8 @@ func NewQueryAllBalancesRequest(addr sdk.AccAddress, req *query.PageRequest) *Qu // NewQuerySpendableBalancesRequest creates a new instance of a // QuerySpendableBalancesRequest. -// nolint:interfacer +// +//nolint:interfacer func NewQuerySpendableBalancesRequest(addr sdk.AccAddress, req *query.PageRequest) *QuerySpendableBalancesRequest { return &QuerySpendableBalancesRequest{Address: addr.String(), Pagination: req} } diff --git a/x/crisis/types/msgs.go b/x/crisis/types/msgs.go index 6161a1d4b7..fcb7dadd11 100644 --- a/x/crisis/types/msgs.go +++ b/x/crisis/types/msgs.go @@ -8,6 +8,7 @@ import ( var _ sdk.Msg = &MsgVerifyInvariant{} // NewMsgVerifyInvariant creates a new MsgVerifyInvariant object +// //nolint:interfacer func NewMsgVerifyInvariant(sender sdk.AccAddress, invModeName, invRoute string) *MsgVerifyInvariant { return &MsgVerifyInvariant{ diff --git a/x/crisis/types/params.go b/x/crisis/types/params.go index 03c8dd8288..b51259ee21 100644 --- a/x/crisis/types/params.go +++ b/x/crisis/types/params.go @@ -7,7 +7,7 @@ import ( paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" ) -// key for constant fee parameter +// ParamStoreKeyConstantFee sets the key for constant fee parameter var ParamStoreKeyConstantFee = []byte("ConstantFee") // type declaration for parameters diff --git a/x/distribution/client/cli/utils.go b/x/distribution/client/cli/utils.go index 45f941f089..ce7024dd2e 100644 --- a/x/distribution/client/cli/utils.go +++ b/x/distribution/client/cli/utils.go @@ -1,7 +1,7 @@ package cli import ( - "io/ioutil" + "os" "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/x/distribution/types" @@ -11,7 +11,7 @@ import ( func ParseCommunityPoolSpendProposalWithDeposit(cdc codec.JSONCodec, proposalFile string) (types.CommunityPoolSpendProposalWithDeposit, error) { proposal := types.CommunityPoolSpendProposalWithDeposit{} - contents, err := ioutil.ReadFile(proposalFile) + contents, err := os.ReadFile(proposalFile) if err != nil { return proposal, err } diff --git a/x/distribution/keeper/allocation.go b/x/distribution/keeper/allocation.go index 273db1314e..02be99db4e 100644 --- a/x/distribution/keeper/allocation.go +++ b/x/distribution/keeper/allocation.go @@ -81,16 +81,22 @@ func (k Keeper) AllocateTokens( // calculate fraction allocated to validators communityTax := k.GetCommunityTax(ctx) voteMultiplier := sdk.OneDec().Sub(proposerMultiplier).Sub(communityTax) + feeMultiplier := feesCollected.MulDecTruncate(voteMultiplier) // allocate tokens proportionally to voting power - // TODO consider parallelizing later, ref https://github.com/cosmos/cosmos-sdk/pull/3099#discussion_r246276376 + // + // TODO: Consider parallelizing later + // + // Ref: https://github.com/cosmos/cosmos-sdk/pull/3099#discussion_r246276376 for _, vote := range bondedVotes { validator := k.stakingKeeper.ValidatorByConsAddr(ctx, vote.Validator.Address) - // TODO consider microslashing for missing votes. - // ref https://github.com/cosmos/cosmos-sdk/issues/2525#issuecomment-430838701 + // TODO: Consider micro-slashing for missing votes. + // + // Ref: https://github.com/cosmos/cosmos-sdk/issues/2525#issuecomment-430838701 powerFraction := sdk.NewDec(vote.Validator.Power).QuoTruncate(sdk.NewDec(totalPreviousPower)) - reward := feesCollected.MulDecTruncate(voteMultiplier).MulDecTruncate(powerFraction) + reward := feeMultiplier.MulDecTruncate(powerFraction) + k.AllocateTokensToValidator(ctx, validator, reward) remaining = remaining.Sub(reward) } @@ -100,7 +106,8 @@ func (k Keeper) AllocateTokens( k.SetFeePool(ctx, feePool) } -// AllocateTokensToValidator allocate tokens to a particular validator, splitting according to commission +// AllocateTokensToValidator allocate tokens to a particular validator, +// splitting according to commission. func (k Keeper) AllocateTokensToValidator(ctx sdk.Context, val stakingtypes.ValidatorI, tokens sdk.DecCoins) { // split tokens between validator and delegators according to commission commission := tokens.MulDec(val.GetCommission()) @@ -131,6 +138,7 @@ func (k Keeper) AllocateTokensToValidator(ctx sdk.Context, val stakingtypes.Vali sdk.NewAttribute(types.AttributeKeyValidator, val.GetOperator().String()), ), ) + outstanding := k.GetValidatorOutstandingRewards(ctx, val.GetOperator()) outstanding.Rewards = outstanding.Rewards.Add(tokens...) k.SetValidatorOutstandingRewards(ctx, val.GetOperator(), outstanding) diff --git a/x/distribution/keeper/delegation.go b/x/distribution/keeper/delegation.go index 3b158c1558..c70062c11d 100644 --- a/x/distribution/keeper/delegation.go +++ b/x/distribution/keeper/delegation.go @@ -4,7 +4,6 @@ import ( "fmt" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/x/distribution/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" ) @@ -162,13 +161,13 @@ func (k Keeper) withdrawDelegationRewards(ctx sdk.Context, val stakingtypes.Vali ) } - // truncate coins, return remainder to community pool - coins, remainder := rewards.TruncateDecimal() + // truncate reward dec coins, return remainder to community pool + finalRewards, remainder := rewards.TruncateDecimal() // add coins to user account - if !coins.IsZero() { + if !finalRewards.IsZero() { withdrawAddr := k.GetDelegatorWithdrawAddr(ctx, del.GetDelegatorAddr()) - err := k.bankKeeper.SendCoinsFromModuleToAccount(ctx, types.ModuleName, withdrawAddr, coins) + err := k.bankKeeper.SendCoinsFromModuleToAccount(ctx, types.ModuleName, withdrawAddr, finalRewards) if err != nil { return nil, err } @@ -189,5 +188,24 @@ func (k Keeper) withdrawDelegationRewards(ctx sdk.Context, val stakingtypes.Vali // remove delegator starting info k.DeleteDelegatorStartingInfo(ctx, del.GetValidatorAddr(), del.GetDelegatorAddr()) - return coins, nil + if finalRewards.IsZero() { + baseDenom, _ := sdk.GetBaseDenom() + if baseDenom == "" { + baseDenom = sdk.DefaultBondDenom + } + + // Note, we do not call the NewCoins constructor as we do not want the zero + // coin removed. + finalRewards = sdk.Coins{sdk.NewCoin(baseDenom, sdk.ZeroInt())} + } + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeWithdrawRewards, + sdk.NewAttribute(sdk.AttributeKeyAmount, finalRewards.String()), + sdk.NewAttribute(types.AttributeKeyValidator, val.GetOperator().String()), + ), + ) + + return finalRewards, nil } diff --git a/x/distribution/keeper/delegation_test.go b/x/distribution/keeper/delegation_test.go index 8b4219e280..020bb88c08 100644 --- a/x/distribution/keeper/delegation_test.go +++ b/x/distribution/keeper/delegation_test.go @@ -330,13 +330,6 @@ func TestWithdrawDelegationRewardsBasic(t *testing.T) { // withdraw commission _, err = app.DistrKeeper.WithdrawValidatorCommission(ctx, valAddrs[0]) require.Nil(t, err) - - // assert correct balance - exp = balanceTokens.Sub(valTokens).Add(initial) - require.Equal(t, - sdk.Coins{sdk.NewCoin(sdk.DefaultBondDenom, exp)}, - app.BankKeeper.GetAllBalances(ctx, sdk.AccAddress(valAddrs[0])), - ) } func TestCalculateRewardsAfterManySlashesInSameBlock(t *testing.T) { diff --git a/x/distribution/keeper/keeper.go b/x/distribution/keeper/keeper.go index 894cd57ce2..22e1a28944 100644 --- a/x/distribution/keeper/keeper.go +++ b/x/distribution/keeper/keeper.go @@ -98,14 +98,6 @@ func (k Keeper) WithdrawDelegationRewards(ctx sdk.Context, delAddr sdk.AccAddres return nil, err } - ctx.EventManager().EmitEvent( - sdk.NewEvent( - types.EventTypeWithdrawRewards, - sdk.NewAttribute(sdk.AttributeKeyAmount, rewards.String()), - sdk.NewAttribute(types.AttributeKeyValidator, valAddr.String()), - ), - ) - // reinitialize the delegation k.initializeDelegation(ctx, valAddr, delAddr) return rewards, nil diff --git a/x/distribution/simulation/operations.go b/x/distribution/simulation/operations.go index dfcefd75aa..89a667777f 100644 --- a/x/distribution/simulation/operations.go +++ b/x/distribution/simulation/operations.go @@ -17,10 +17,10 @@ import ( // Simulation operation weights constants const ( - OpWeightMsgSetWithdrawAddress = "op_weight_msg_set_withdraw_address" - OpWeightMsgWithdrawDelegationReward = "op_weight_msg_withdraw_delegation_reward" - OpWeightMsgWithdrawValidatorCommission = "op_weight_msg_withdraw_validator_commission" - OpWeightMsgFundCommunityPool = "op_weight_msg_fund_community_pool" + OpWeightMsgSetWithdrawAddress = "op_weight_msg_set_withdraw_address" //nolint:gosec + OpWeightMsgWithdrawDelegationReward = "op_weight_msg_withdraw_delegation_reward" //nolint:gosec + OpWeightMsgWithdrawValidatorCommission = "op_weight_msg_withdraw_validator_commission" //nolint:gosec + OpWeightMsgFundCommunityPool = "op_weight_msg_fund_community_pool" //nolint:gosec ) // WeightedOperations returns all the operations from the module with their respective weights @@ -229,7 +229,6 @@ func SimulateMsgFundCommunityPool(ak types.AccountKeeper, bk types.BankKeeper, k msg := types.NewMsgFundCommunityPool(fundAmount, funder.Address) txCtx := simulation.OperationInput{ - R: r, App: app, TxGen: simappparams.MakeTestEncodingConfig().TxConfig, Cdc: nil, diff --git a/x/distribution/types/proposal.go b/x/distribution/types/proposal.go index 1a0d0a886e..2eed72a2e4 100644 --- a/x/distribution/types/proposal.go +++ b/x/distribution/types/proposal.go @@ -22,6 +22,7 @@ func init() { } // NewCommunityPoolSpendProposal creates a new community pool spned proposal. +// //nolint:interfacer func NewCommunityPoolSpendProposal(title, description string, recipient sdk.AccAddress, amount sdk.Coins) *CommunityPoolSpendProposal { return &CommunityPoolSpendProposal{title, description, recipient.String(), amount} diff --git a/x/distribution/types/query.go b/x/distribution/types/query.go index 13423038b2..40c1678277 100644 --- a/x/distribution/types/query.go +++ b/x/distribution/types/query.go @@ -34,6 +34,7 @@ func (res QueryDelegatorTotalRewardsResponse) String() string { } // NewDelegationDelegatorReward constructs a DelegationDelegatorReward. +// //nolint:interfacer func NewDelegationDelegatorReward(valAddr sdk.ValAddress, reward sdk.DecCoins, diff --git a/x/evidence/types/msgs.go b/x/evidence/types/msgs.go index cd2e2ba032..6d3a86ab7c 100644 --- a/x/evidence/types/msgs.go +++ b/x/evidence/types/msgs.go @@ -23,6 +23,7 @@ var ( ) // NewMsgSubmitEvidence returns a new MsgSubmitEvidence with a signer/submitter. +// //nolint:interfacer func NewMsgSubmitEvidence(s sdk.AccAddress, evi exported.Evidence) (*MsgSubmitEvidence, error) { msg, ok := evi.(proto.Message) diff --git a/x/feegrant/grant.go b/x/feegrant/grant.go index f8d7823ff0..f169720cc6 100644 --- a/x/feegrant/grant.go +++ b/x/feegrant/grant.go @@ -11,6 +11,7 @@ import ( var _ types.UnpackInterfacesMessage = &Grant{} // NewGrant creates a new FeeAllowanceGrant. +// //nolint:interfacer func NewGrant(granter, grantee sdk.AccAddress, feeAllowance FeeAllowanceI) (Grant, error) { msg, ok := feeAllowance.(proto.Message) diff --git a/x/feegrant/msgs.go b/x/feegrant/msgs.go index 63c29eb4ea..340007f8da 100644 --- a/x/feegrant/msgs.go +++ b/x/feegrant/msgs.go @@ -18,6 +18,7 @@ var ( ) // NewMsgGrantAllowance creates a new MsgGrantAllowance. +// //nolint:interfacer func NewMsgGrantAllowance(feeAllowance FeeAllowanceI, granter, grantee sdk.AccAddress) (*MsgGrantAllowance, error) { msg, ok := feeAllowance.(proto.Message) @@ -98,6 +99,7 @@ func (msg MsgGrantAllowance) UnpackInterfaces(unpacker types.AnyUnpacker) error // NewMsgRevokeAllowance returns a message to revoke a fee allowance for a given // granter and grantee +// //nolint:interfacer func NewMsgRevokeAllowance(granter sdk.AccAddress, grantee sdk.AccAddress) MsgRevokeAllowance { return MsgRevokeAllowance{Granter: granter.String(), Grantee: grantee.String()} diff --git a/x/feegrant/simulation/operations.go b/x/feegrant/simulation/operations.go index 652b3b2f1a..b2e8ee4874 100644 --- a/x/feegrant/simulation/operations.go +++ b/x/feegrant/simulation/operations.go @@ -14,6 +14,8 @@ import ( ) // Simulation operation weights constants +// +//nolint:gosec const ( OpWeightMsgGrantAllowance = "op_weight_msg_grant_fee_allowance" OpWeightMsgRevokeAllowance = "op_weight_msg_grant_revoke_allowance" diff --git a/x/genutil/client/cli/gentx.go b/x/genutil/client/cli/gentx.go index d780be6665..6dce3c3153 100644 --- a/x/genutil/client/cli/gentx.go +++ b/x/genutil/client/cli/gentx.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "path/filepath" @@ -223,7 +222,7 @@ func makeOutputFilepath(rootDir, nodeID string) (string, error) { } func readUnsignedGenTxFile(clientCtx client.Context, r io.Reader) (sdk.Tx, error) { - bz, err := ioutil.ReadAll(r) + bz, err := io.ReadAll(r) if err != nil { return nil, err } diff --git a/x/genutil/collect.go b/x/genutil/collect.go index be8219a1b1..0018f0c0d9 100644 --- a/x/genutil/collect.go +++ b/x/genutil/collect.go @@ -6,7 +6,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "runtime" @@ -78,8 +77,7 @@ func CollectTxs(cdc codec.JSONCodec, txJSONDecoder sdk.TxDecoder, moniker, genTx return appGenTxs, persistentPeers, err } - var fos []os.FileInfo - fos, err = ioutil.ReadDir(genTxsDir) + fos, err := os.ReadDir(genTxsDir) if err != nil { return appGenTxs, persistentPeers, err } @@ -106,7 +104,7 @@ func CollectTxs(cdc codec.JSONCodec, txJSONDecoder sdk.TxDecoder, moniker, genTx } // get the genTx - jsonRawTx, err := ioutil.ReadFile(filepath.Join(genTxsDir, fo.Name())) + jsonRawTx, err := os.ReadFile(filepath.Join(genTxsDir, fo.Name())) if err != nil { return appGenTxs, persistentPeers, err } diff --git a/x/genutil/collect_test.go b/x/genutil/collect_test.go index 82ec95a9b8..c9befdd547 100644 --- a/x/genutil/collect_test.go +++ b/x/genutil/collect_test.go @@ -2,7 +2,6 @@ package genutil_test import ( "encoding/json" - "io/ioutil" "os" "path/filepath" "testing" @@ -38,7 +37,7 @@ func (dni *doNothingIterator) IterateGenesisBalances(_ codec.JSONCodec, _ map[st // Ensures that CollectTx correctly traverses directories and won't error out on encountering // a directory during traversal of the first level. See issue https://github.com/cosmos/cosmos-sdk/issues/6788. func TestCollectTxsHandlesDirectories(t *testing.T) { - testDir, err := ioutil.TempDir(os.TempDir(), "testCollectTxs") + testDir, err := os.MkdirTemp(os.TempDir(), "testCollectTxs") if err != nil { t.Fatal(err) } diff --git a/x/genutil/doc.go b/x/genutil/doc.go index bccc82a1af..6aa85b9775 100644 --- a/x/genutil/doc.go +++ b/x/genutil/doc.go @@ -1,10 +1,10 @@ /* Package genutil contains a variety of genesis utility functionality for usage within a blockchain application. Namely: - - Genesis transactions related (gentx) - - commands for collection and creation of gentxs - - initchain processing of gentxs - - Genesis file validation - - Tendermint related initialization + - Genesis transactions related (gentx) + - commands for collection and creation of gentxs + - initchain processing of gentxs + - Genesis file validation + - Tendermint related initialization */ package genutil diff --git a/x/genutil/gentx_test.go b/x/genutil/gentx_test.go index bf0a6ff7c0..a6e636f84f 100644 --- a/x/genutil/gentx_test.go +++ b/x/genutil/gentx_test.go @@ -3,9 +3,7 @@ package genutil_test import ( "encoding/json" "fmt" - "math/rand" "testing" - "time" "github.com/stretchr/testify/suite" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -235,7 +233,6 @@ func (suite *GenTxTestSuite) TestDeliverGenTxs() { msg := banktypes.NewMsgSend(addr1, addr2, sdk.Coins{sdk.NewInt64Coin(sdk.DefaultBondDenom, 1)}) tx, err := helpers.GenTx( - rand.New(rand.NewSource(time.Now().UnixNano())), suite.encodingConfig.TxConfig, []sdk.Msg{msg}, sdk.Coins{sdk.NewInt64Coin(sdk.DefaultBondDenom, 10)}, diff --git a/x/gov/client/cli/parse.go b/x/gov/client/cli/parse.go index 90066b645a..eeb814eb98 100644 --- a/x/gov/client/cli/parse.go +++ b/x/gov/client/cli/parse.go @@ -3,7 +3,7 @@ package cli import ( "encoding/json" "fmt" - "io/ioutil" + "os" "github.com/spf13/pflag" @@ -30,7 +30,7 @@ func parseSubmitProposalFlags(fs *pflag.FlagSet) (*proposal, error) { } } - contents, err := ioutil.ReadFile(proposalFile) + contents, err := os.ReadFile(proposalFile) if err != nil { return nil, err } diff --git a/x/gov/legacy/v043/store.go b/x/gov/legacy/v043/store.go index 2de980dbab..d7f8e9727a 100644 --- a/x/gov/legacy/v043/store.go +++ b/x/gov/legacy/v043/store.go @@ -40,7 +40,7 @@ func migrateVote(oldVote types.Vote) types.Vote { return types.Vote{ ProposalId: oldVote.ProposalId, Voter: oldVote.Voter, - Options: types.NewNonSplitVoteOption(oldVote.Option), + Options: types.NewNonSplitVoteOption(oldVote.Option), //nolint:staticcheck } } diff --git a/x/gov/simulation/operations.go b/x/gov/simulation/operations.go index ac29d90ce8..25fc6d9b81 100644 --- a/x/gov/simulation/operations.go +++ b/x/gov/simulation/operations.go @@ -20,9 +20,9 @@ var initialProposalID = uint64(100000000000000) // Simulation operation weights constants const ( - OpWeightMsgDeposit = "op_weight_msg_deposit" - OpWeightMsgVote = "op_weight_msg_vote" - OpWeightMsgVoteWeighted = "op_weight_msg_weighted_vote" + OpWeightMsgDeposit = "op_weight_msg_deposit" //nolint:gosec + OpWeightMsgVote = "op_weight_msg_vote" //nolint:gosec + OpWeightMsgVoteWeighted = "op_weight_msg_weighted_vote" //nolint:gosec ) // WeightedOperations returns all the operations from the module with their respective weights @@ -155,7 +155,6 @@ func SimulateMsgSubmitProposal( txGen := simappparams.MakeTestEncodingConfig().TxConfig tx, err := helpers.GenTx( - r, txGen, []sdk.Msg{msg}, fees, @@ -242,7 +241,6 @@ func SimulateMsgDeposit(ak types.AccountKeeper, bk types.BankKeeper, k keeper.Ke } txCtx := simulation.OperationInput{ - R: r, App: app, TxGen: simappparams.MakeTestEncodingConfig().TxConfig, Cdc: nil, diff --git a/x/gov/types/deposit.go b/x/gov/types/deposit.go index e386088737..08374341a1 100644 --- a/x/gov/types/deposit.go +++ b/x/gov/types/deposit.go @@ -9,6 +9,7 @@ import ( ) // NewDeposit creates a new Deposit instance +// //nolint:interfacer func NewDeposit(proposalID uint64, depositor sdk.AccAddress, amount sdk.Coins) Deposit { return Deposit{proposalID, depositor.String(), amount} diff --git a/x/gov/types/msgs.go b/x/gov/types/msgs.go index f1c351e6dd..9661e5b212 100644 --- a/x/gov/types/msgs.go +++ b/x/gov/types/msgs.go @@ -26,6 +26,7 @@ var ( ) // NewMsgSubmitProposal creates a new MsgSubmitProposal. +// //nolint:interfacer func NewMsgSubmitProposal(content Content, initialDeposit sdk.Coins, proposer sdk.AccAddress) (*MsgSubmitProposal, error) { m := &MsgSubmitProposal{ @@ -132,6 +133,7 @@ func (m MsgSubmitProposal) UnpackInterfaces(unpacker types.AnyUnpacker) error { } // NewMsgDeposit creates a new MsgDeposit instance +// //nolint:interfacer func NewMsgDeposit(depositor sdk.AccAddress, proposalID uint64, amount sdk.Coins) *MsgDeposit { return &MsgDeposit{proposalID, depositor.String(), amount} @@ -177,6 +179,7 @@ func (msg MsgDeposit) GetSigners() []sdk.AccAddress { } // NewMsgVote creates a message to cast a vote on an active proposal +// //nolint:interfacer func NewMsgVote(voter sdk.AccAddress, proposalID uint64, option VoteOption) *MsgVote { return &MsgVote{proposalID, voter.String(), option} @@ -220,6 +223,7 @@ func (msg MsgVote) GetSigners() []sdk.AccAddress { } // NewMsgVoteWeighted creates a message to cast a vote on an active proposal +// //nolint:interfacer func NewMsgVoteWeighted(voter sdk.AccAddress, proposalID uint64, options WeightedVoteOptions) *MsgVoteWeighted { return &MsgVoteWeighted{proposalID, voter.String(), options} diff --git a/x/gov/types/vote.go b/x/gov/types/vote.go index 03d1e5a44f..a2797d1909 100644 --- a/x/gov/types/vote.go +++ b/x/gov/types/vote.go @@ -11,6 +11,7 @@ import ( ) // NewVote creates a new Vote instance +// //nolint:interfacer func NewVote(proposalID uint64, voter sdk.AccAddress, options WeightedVoteOptions) Vote { return Vote{ProposalId: proposalID, Voter: voter.String(), Options: options} diff --git a/x/params/client/utils/utils.go b/x/params/client/utils/utils.go index b4c680fdde..5db09902bb 100644 --- a/x/params/client/utils/utils.go +++ b/x/params/client/utils/utils.go @@ -2,7 +2,7 @@ package utils import ( "encoding/json" - "io/ioutil" + "os" "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" @@ -68,7 +68,7 @@ func (pcj ParamChangesJSON) ToParamChanges() []proposal.ParamChange { func ParseParamChangeProposalJSON(cdc *codec.LegacyAmino, proposalFile string) (ParamChangeProposalJSON, error) { proposal := ParamChangeProposalJSON{} - contents, err := ioutil.ReadFile(proposalFile) + contents, err := os.ReadFile(proposalFile) if err != nil { return proposal, err } diff --git a/x/simulation/doc.go b/x/simulation/doc.go index 6543bc5c15..f92a6cac8b 100644 --- a/x/simulation/doc.go +++ b/x/simulation/doc.go @@ -2,7 +2,7 @@ Package simulation implements a full fledged Cosmos SDK application used for executing simulation test suites. -Simulation App +# Simulation App The SimApp type defines an application used for running extensive simulation testing suites. It contains all core modules, including governance, staking, @@ -30,7 +30,7 @@ still be pseudo-randomly simulated. The simulation test suite also supports testing determinism and import/export functionality. -Randomness +# Randomness Currently, simulation uses a single seed (integer) as a source for a PRNG by which all random operations are executed from. Any call to the PRNG changes all @@ -44,75 +44,74 @@ the simulation suite is expected to support a series of PRNGs that can be used uniquely per module and simulation component so that they will not effect each others state execution outcome. -Usage +# Usage To execute a completely pseudo-random simulation: - $ go test -mod=readonly github.com/cosmos/cosmos-sdk/simapp \ - -run=TestFullAppSimulation \ - -Enabled=true \ - -NumBlocks=100 \ - -BlockSize=200 \ - -Commit=true \ - -Seed=99 \ - -Period=5 \ - -v -timeout 24h + $ go test -mod=readonly github.com/cosmos/cosmos-sdk/simapp \ + -run=TestFullAppSimulation \ + -Enabled=true \ + -NumBlocks=100 \ + -BlockSize=200 \ + -Commit=true \ + -Seed=99 \ + -Period=5 \ + -v -timeout 24h To execute simulation from a genesis file: - $ go test -mod=readonly github.com/cosmos/cosmos-sdk/simapp \ - -run=TestFullAppSimulation \ - -Enabled=true \ - -NumBlocks=100 \ - -BlockSize=200 \ - -Commit=true \ - -Seed=99 \ - -Period=5 \ - -Genesis=/path/to/genesis.json \ - -v -timeout 24h + $ go test -mod=readonly github.com/cosmos/cosmos-sdk/simapp \ + -run=TestFullAppSimulation \ + -Enabled=true \ + -NumBlocks=100 \ + -BlockSize=200 \ + -Commit=true \ + -Seed=99 \ + -Period=5 \ + -Genesis=/path/to/genesis.json \ + -v -timeout 24h To execute simulation from a simulation params file: - $ go test -mod=readonly github.com/cosmos/cosmos-sdk/simapp \ - -run=TestFullAppSimulation \ - -Enabled=true \ - -NumBlocks=100 \ - -BlockSize=200 \ - -Commit=true \ - -Seed=99 \ - -Period=5 \ - -Params=/path/to/params.json \ - -v -timeout 24h + $ go test -mod=readonly github.com/cosmos/cosmos-sdk/simapp \ + -run=TestFullAppSimulation \ + -Enabled=true \ + -NumBlocks=100 \ + -BlockSize=200 \ + -Commit=true \ + -Seed=99 \ + -Period=5 \ + -Params=/path/to/params.json \ + -v -timeout 24h To export the simulation params to a file at a given block height: - $ go test -mod=readonly github.com/cosmos/cosmos-sdk/simapp \ - -run=TestFullAppSimulation \ - -Enabled=true \ - -NumBlocks=100 \ - -BlockSize=200 \ - -Commit=true \ - -Seed=99 \ - -Period=5 \ - -ExportParamsPath=/path/to/params.json \ - -ExportParamsHeight=50 \ - -v -timeout 24h - + $ go test -mod=readonly github.com/cosmos/cosmos-sdk/simapp \ + -run=TestFullAppSimulation \ + -Enabled=true \ + -NumBlocks=100 \ + -BlockSize=200 \ + -Commit=true \ + -Seed=99 \ + -Period=5 \ + -ExportParamsPath=/path/to/params.json \ + -ExportParamsHeight=50 \ + -v -timeout 24h To export the simulation app state (i.e genesis) to a file: - $ go test -mod=readonly github.com/cosmos/cosmos-sdk/simapp \ - -run=TestFullAppSimulation \ - -Enabled=true \ - -NumBlocks=100 \ - -BlockSize=200 \ - -Commit=true \ - -Seed=99 \ - -Period=5 \ - -ExportStatePath=/path/to/genesis.json \ - v -timeout 24h - -Params + $ go test -mod=readonly github.com/cosmos/cosmos-sdk/simapp \ + -run=TestFullAppSimulation \ + -Enabled=true \ + -NumBlocks=100 \ + -BlockSize=200 \ + -Commit=true \ + -Seed=99 \ + -Period=5 \ + -ExportStatePath=/path/to/genesis.json \ + v -timeout 24h + +# Params Params that are provided to simulation from a JSON file are used to used to set both module parameters and simulation parameters. See sim_test.go for the full diff --git a/x/simulation/event_stats.go b/x/simulation/event_stats.go index 2b2438ce25..675380e821 100644 --- a/x/simulation/event_stats.go +++ b/x/simulation/event_stats.go @@ -4,7 +4,7 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" + "os" ) // EventStats defines an object that keeps a tally of each event that has occurred @@ -48,7 +48,7 @@ func (es EventStats) ExportJSON(path string) { panic(err) } - err = ioutil.WriteFile(path, bz, 0o600) + err = os.WriteFile(path, bz, 0o600) if err != nil { panic(err) } diff --git a/x/simulation/util.go b/x/simulation/util.go index 88b2b49991..beeb009d02 100644 --- a/x/simulation/util.go +++ b/x/simulation/util.go @@ -30,9 +30,9 @@ func getTestingMode(tb testing.TB) (testingMode bool, t *testing.T, b *testing.B // getBlockSize returns a block size as determined from the transition matrix. // It targets making average block size the provided parameter. The three // states it moves between are: -// - "over stuffed" blocks with average size of 2 * avgblocksize, -// - normal sized blocks, hitting avgBlocksize on average, -// - and empty blocks, with no txs / only txs scheduled from the past. +// - "over stuffed" blocks with average size of 2 * avgblocksize, +// - normal sized blocks, hitting avgBlocksize on average, +// - and empty blocks, with no txs / only txs scheduled from the past. func getBlockSize(r *rand.Rand, params Params, lastBlockSizeState, avgBlockSize int) (state, blockSize int) { // TODO: Make default blocksize transition matrix actually make the average // blocksize equal to avgBlockSize. @@ -101,7 +101,6 @@ func GenAndDeliverTxWithRandFees(txCtx OperationInput) (simtypes.OperationMsg, [ func GenAndDeliverTx(txCtx OperationInput, fees sdk.Coins) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { account := txCtx.AccountKeeper.GetAccount(txCtx.Context, txCtx.SimAccount.Address) tx, err := helpers.GenTx( - txCtx.R, txCtx.TxGen, []sdk.Msg{txCtx.Msg}, fees, diff --git a/x/slashing/simulation/operations.go b/x/slashing/simulation/operations.go index fee0f859af..788ae4f90e 100644 --- a/x/slashing/simulation/operations.go +++ b/x/slashing/simulation/operations.go @@ -18,7 +18,7 @@ import ( // Simulation operation weights constants const ( - OpWeightMsgUnjail = "op_weight_msg_unjail" + OpWeightMsgUnjail = "op_weight_msg_unjail" //nolint:gosec ) // WeightedOperations returns all the operations from the module with their respective weights @@ -88,7 +88,6 @@ func SimulateMsgUnjail(ak types.AccountKeeper, bk types.BankKeeper, k keeper.Kee txGen := simappparams.MakeTestEncodingConfig().TxConfig tx, err := helpers.GenTx( - r, txGen, []sdk.Msg{msg}, fees, diff --git a/x/slashing/types/msg.go b/x/slashing/types/msg.go index d86ff5eb72..64948d8f8d 100644 --- a/x/slashing/types/msg.go +++ b/x/slashing/types/msg.go @@ -13,6 +13,7 @@ const ( var _ sdk.Msg = &MsgUnjail{} // NewMsgUnjail creates a new MsgUnjail instance +// //nolint:interfacer func NewMsgUnjail(validatorAddr sdk.ValAddress) *MsgUnjail { return &MsgUnjail{ diff --git a/x/slashing/types/signing_info.go b/x/slashing/types/signing_info.go index d9b00da199..3dacd12d63 100644 --- a/x/slashing/types/signing_info.go +++ b/x/slashing/types/signing_info.go @@ -9,6 +9,7 @@ import ( ) // NewValidatorSigningInfo creates a new ValidatorSigningInfo instance +// //nolint:interfacer func NewValidatorSigningInfo( condAddr sdk.ConsAddress, startHeight, indexOffset int64, diff --git a/x/staking/keeper/delegation_test.go b/x/staking/keeper/delegation_test.go index 579cb1d055..069d4c943b 100644 --- a/x/staking/keeper/delegation_test.go +++ b/x/staking/keeper/delegation_test.go @@ -305,8 +305,8 @@ func TestUnbondingDelegationsMaxEntries(t *testing.T) { require.True(sdk.IntEq(t, newNotBonded, oldNotBonded.AddRaw(1))) } -//// test undelegating self delegation from a validator pushing it below MinSelfDelegation -//// shift it from the bonded to unbonding state and jailed +// // test undelegating self delegation from a validator pushing it below MinSelfDelegation +// // shift it from the bonded to unbonding state and jailed func TestUndelegateSelfDelegationBelowMinSelfDelegation(t *testing.T) { _, app, ctx := createTestInput() diff --git a/x/staking/keeper/historical_info.go b/x/staking/keeper/historical_info.go index df13dff357..243612d387 100644 --- a/x/staking/keeper/historical_info.go +++ b/x/staking/keeper/historical_info.go @@ -35,7 +35,9 @@ func (k Keeper) DeleteHistoricalInfo(ctx sdk.Context, height int64) { } // IterateHistoricalInfo provides an interator over all stored HistoricalInfo -// objects. For each HistoricalInfo object, cb will be called. If the cb returns +// +// objects. For each HistoricalInfo object, cb will be called. If the cb returns +// // true, the iterator will close and stop. func (k Keeper) IterateHistoricalInfo(ctx sdk.Context, cb func(types.HistoricalInfo) bool) { store := ctx.KVStore(k.storeKey) diff --git a/x/staking/keeper/slash.go b/x/staking/keeper/slash.go index 90cff7f15b..ae41eb72d6 100644 --- a/x/staking/keeper/slash.go +++ b/x/staking/keeper/slash.go @@ -12,15 +12,22 @@ import ( // of it, updating unbonding delegations & redelegations appropriately // // CONTRACT: -// slashFactor is non-negative +// +// slashFactor is non-negative +// // CONTRACT: -// Infraction was committed equal to or less than an unbonding period in the past, -// so all unbonding delegations and redelegations from that height are stored +// +// Infraction was committed equal to or less than an unbonding period in the past, +// so all unbonding delegations and redelegations from that height are stored +// // CONTRACT: -// Slash will not slash unbonded validators (for the above reason) +// +// Slash will not slash unbonded validators (for the above reason) +// // CONTRACT: -// Infraction was committed at the current height or at a past height, -// not at a height in the future +// +// Infraction was committed at the current height or at a past height, +// not at a height in the future func (k Keeper) Slash(ctx sdk.Context, consAddr sdk.ConsAddress, infractionHeight int64, power int64, slashFactor sdk.Dec) { logger := k.Logger(ctx) diff --git a/x/staking/legacy/v034/types.go b/x/staking/legacy/v034/types.go index 868a6901f8..789731819c 100644 --- a/x/staking/legacy/v034/types.go +++ b/x/staking/legacy/v034/types.go @@ -10,7 +10,7 @@ import ( "github.com/cosmos/cosmos-sdk/codec/legacy" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/bech32/legacybech32" + "github.com/cosmos/cosmos-sdk/types/bech32/legacybech32" //nolint:staticcheck ) const ( @@ -142,7 +142,7 @@ type ( ) func (v Validator) MarshalJSON() ([]byte, error) { - bechConsPubKey, err := legacybech32.MarshalPubKey(legacybech32.ConsPK, v.ConsPubKey) + bechConsPubKey, err := legacybech32.MarshalPubKey(legacybech32.ConsPK, v.ConsPubKey) //nolint:staticcheck if err != nil { return nil, err } @@ -168,7 +168,7 @@ func (v *Validator) UnmarshalJSON(data []byte) error { if err := legacy.Cdc.UnmarshalJSON(data, bv); err != nil { return err } - consPubKey, err := legacybech32.UnmarshalPubKey(legacybech32.ConsPK, bv.ConsPubKey) + consPubKey, err := legacybech32.UnmarshalPubKey(legacybech32.ConsPK, bv.ConsPubKey) //nolint:staticcheck if err != nil { return err } diff --git a/x/staking/legacy/v036/types.go b/x/staking/legacy/v036/types.go index b1fef50faa..bb423c7520 100644 --- a/x/staking/legacy/v036/types.go +++ b/x/staking/legacy/v036/types.go @@ -10,7 +10,7 @@ import ( "github.com/cosmos/cosmos-sdk/codec/legacy" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/bech32/legacybech32" + "github.com/cosmos/cosmos-sdk/types/bech32/legacybech32" //nolint:staticcheck v034staking "github.com/cosmos/cosmos-sdk/x/staking/legacy/v034" ) @@ -90,7 +90,7 @@ func NewGenesisState( } func (v Validator) MarshalJSON() ([]byte, error) { - bechConsPubKey, err := legacybech32.MarshalPubKey(legacybech32.ConsPK, v.ConsPubKey) + bechConsPubKey, err := legacybech32.MarshalPubKey(legacybech32.ConsPK, v.ConsPubKey) //nolint:staticcheck if err != nil { return nil, err } @@ -115,7 +115,7 @@ func (v *Validator) UnmarshalJSON(data []byte) error { if err := legacy.Cdc.UnmarshalJSON(data, bv); err != nil { return err } - consPubKey, err := legacybech32.UnmarshalPubKey(legacybech32.ConsPK, bv.ConsPubKey) + consPubKey, err := legacybech32.UnmarshalPubKey(legacybech32.ConsPK, bv.ConsPubKey) //nolint:staticcheck if err != nil { return err } diff --git a/x/staking/legacy/v038/types.go b/x/staking/legacy/v038/types.go index f5953bc543..f8559d330b 100644 --- a/x/staking/legacy/v038/types.go +++ b/x/staking/legacy/v038/types.go @@ -10,7 +10,7 @@ import ( "github.com/cosmos/cosmos-sdk/codec/legacy" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/bech32/legacybech32" + "github.com/cosmos/cosmos-sdk/types/bech32/legacybech32" //nolint:staticcheck v034staking "github.com/cosmos/cosmos-sdk/x/staking/legacy/v034" v036staking "github.com/cosmos/cosmos-sdk/x/staking/legacy/v036" ) @@ -115,7 +115,7 @@ func NewGenesisState( // MarshalJSON marshals the validator to JSON using Bech32 func (v Validator) MarshalJSON() ([]byte, error) { - bechConsPubKey, err := legacybech32.MarshalPubKey(legacybech32.ConsPK, v.ConsPubKey) + bechConsPubKey, err := legacybech32.MarshalPubKey(legacybech32.ConsPK, v.ConsPubKey) //nolint:staticcheck if err != nil { return nil, err } @@ -141,7 +141,7 @@ func (v *Validator) UnmarshalJSON(data []byte) error { if err := legacy.Cdc.UnmarshalJSON(data, bv); err != nil { return err } - consPubKey, err := legacybech32.UnmarshalPubKey(legacybech32.ConsPK, bv.ConsPubKey) + consPubKey, err := legacybech32.UnmarshalPubKey(legacybech32.ConsPK, bv.ConsPubKey) //nolint:staticcheck if err != nil { return err } diff --git a/x/staking/legacy/v040/genesis.pb.go b/x/staking/legacy/v040/genesis.pb.go index 379c1ca1ef..c8747812d1 100644 --- a/x/staking/legacy/v040/genesis.pb.go +++ b/x/staking/legacy/v040/genesis.pb.go @@ -1,7 +1,7 @@ // Package v040 is taken from: // https://github.com/cosmos/cosmos-sdk/blob/v0.40.1/x/staking/types/genesis.pb.go // by copy-pasted only the relevants parts for Genesis. -// nolint +//nolint package v040 import ( diff --git a/x/staking/legacy/v040/staking.pb.go b/x/staking/legacy/v040/staking.pb.go index 138f9164c3..3357e82641 100644 --- a/x/staking/legacy/v040/staking.pb.go +++ b/x/staking/legacy/v040/staking.pb.go @@ -8,7 +8,6 @@ import ( compress_gzip "compress/gzip" fmt "fmt" io "io" - io_ioutil "io/ioutil" math "math" math_bits "math/bits" reflect "reflect" @@ -1929,7 +1928,7 @@ func StakingDescription() (desc *github_com_gogo_protobuf_protoc_gen_gogo_descri if err != nil { panic(err) } - ungzipped, err := io_ioutil.ReadAll(gzipr) + ungzipped, err := io.ReadAll(gzipr) if err != nil { panic(err) } diff --git a/x/staking/simulation/operations.go b/x/staking/simulation/operations.go index 8df4164fba..6bedbb5315 100644 --- a/x/staking/simulation/operations.go +++ b/x/staking/simulation/operations.go @@ -16,11 +16,11 @@ import ( // Simulation operation weights constants const ( - OpWeightMsgCreateValidator = "op_weight_msg_create_validator" - OpWeightMsgEditValidator = "op_weight_msg_edit_validator" - OpWeightMsgDelegate = "op_weight_msg_delegate" - OpWeightMsgUndelegate = "op_weight_msg_undelegate" - OpWeightMsgBeginRedelegate = "op_weight_msg_begin_redelegate" + OpWeightMsgCreateValidator = "op_weight_msg_create_validator" //nolint:gosec + OpWeightMsgEditValidator = "op_weight_msg_edit_validator" //nolint:gosec + OpWeightMsgDelegate = "op_weight_msg_delegate" //nolint:gosec + OpWeightMsgUndelegate = "op_weight_msg_undelegate" //nolint:gosec + OpWeightMsgBeginRedelegate = "op_weight_msg_begin_redelegate" //nolint:gosec ) // WeightedOperations returns all the operations from the module with their respective weights @@ -152,7 +152,6 @@ func SimulateMsgCreateValidator(ak types.AccountKeeper, bk types.BankKeeper, k k } txCtx := simulation.OperationInput{ - R: r, App: app, TxGen: simappparams.MakeTestEncodingConfig().TxConfig, Cdc: nil, @@ -277,7 +276,6 @@ func SimulateMsgDelegate(ak types.AccountKeeper, bk types.BankKeeper, k keeper.K msg := types.NewMsgDelegate(simAccount.Address, val.GetOperator(), bondAmt) txCtx := simulation.OperationInput{ - R: r, App: app, TxGen: simappparams.MakeTestEncodingConfig().TxConfig, Cdc: nil, diff --git a/x/staking/types/delegation.go b/x/staking/types/delegation.go index 7c978ba0e3..20a6c6763e 100644 --- a/x/staking/types/delegation.go +++ b/x/staking/types/delegation.go @@ -28,6 +28,7 @@ func (dvv DVVTriplet) String() string { } // NewDelegation creates a new delegation object +// //nolint:interfacer func NewDelegation(delegatorAddr sdk.AccAddress, validatorAddr sdk.ValAddress, shares sdk.Dec) Delegation { return Delegation{ @@ -112,6 +113,7 @@ func (e UnbondingDelegationEntry) IsMature(currentTime time.Time) bool { } // NewUnbondingDelegation - create a new unbonding delegation object +// //nolint:interfacer func NewUnbondingDelegation( delegatorAddr sdk.AccAddress, validatorAddr sdk.ValAddress, @@ -333,6 +335,7 @@ func (d DelegationResponses) String() (out string) { } // NewRedelegationResponse crates a new RedelegationEntryResponse instance. +// //nolint:interfacer func NewRedelegationResponse( delegatorAddr sdk.AccAddress, validatorSrc, validatorDst sdk.ValAddress, entries []RedelegationEntryResponse, diff --git a/x/staking/types/msg.go b/x/staking/types/msg.go index af60ac370d..7252761acf 100644 --- a/x/staking/types/msg.go +++ b/x/staking/types/msg.go @@ -150,6 +150,7 @@ func (msg MsgCreateValidator) UnpackInterfaces(unpacker codectypes.AnyUnpacker) } // NewMsgEditValidator creates a new MsgEditValidator instance +// //nolint:interfacer func NewMsgEditValidator(valAddr sdk.ValAddress, description Description, newRate *sdk.Dec, newMinSelfDelegation *sdk.Int) *MsgEditValidator { return &MsgEditValidator{ @@ -208,6 +209,7 @@ func (msg MsgEditValidator) ValidateBasic() error { } // NewMsgDelegate creates a new MsgDelegate instance. +// //nolint:interfacer func NewMsgDelegate(delAddr sdk.AccAddress, valAddr sdk.ValAddress, amount sdk.Coin) *MsgDelegate { return &MsgDelegate{ @@ -259,6 +261,7 @@ func (msg MsgDelegate) ValidateBasic() error { } // NewMsgBeginRedelegate creates a new MsgBeginRedelegate instance. +// //nolint:interfacer func NewMsgBeginRedelegate( delAddr sdk.AccAddress, valSrcAddr, valDstAddr sdk.ValAddress, amount sdk.Coin, @@ -317,6 +320,7 @@ func (msg MsgBeginRedelegate) ValidateBasic() error { } // NewMsgUndelegate creates a new MsgUndelegate instance. +// //nolint:interfacer func NewMsgUndelegate(delAddr sdk.AccAddress, valAddr sdk.ValAddress, amount sdk.Coin) *MsgUndelegate { return &MsgUndelegate{ diff --git a/x/staking/types/staking.pb.go b/x/staking/types/staking.pb.go index 618599edba..db19e5d83e 100644 --- a/x/staking/types/staking.pb.go +++ b/x/staking/types/staking.pb.go @@ -20,7 +20,6 @@ import ( _ "google.golang.org/protobuf/types/known/durationpb" _ "google.golang.org/protobuf/types/known/timestamppb" io "io" - io_ioutil "io/ioutil" math "math" math_bits "math/bits" reflect "reflect" @@ -1882,7 +1881,7 @@ func StakingDescription() (desc *github_com_gogo_protobuf_protoc_gen_gogo_descri if err != nil { panic(err) } - ungzipped, err := io_ioutil.ReadAll(gzipr) + ungzipped, err := io.ReadAll(gzipr) if err != nil { panic(err) } diff --git a/x/staking/types/tx.pb.go b/x/staking/types/tx.pb.go index c3a1bf7065..59e3b4ba66 100644 --- a/x/staking/types/tx.pb.go +++ b/x/staking/types/tx.pb.go @@ -240,6 +240,27 @@ func (m *MsgDelegate) XXX_DiscardUnknown() { var xxx_messageInfo_MsgDelegate proto.InternalMessageInfo +func (m *MsgDelegate) GetDelegatorAddress() string { + if m != nil { + return m.DelegatorAddress + } + return "" +} + +func (m *MsgDelegate) GetValidatorAddress() string { + if m != nil { + return m.ValidatorAddress + } + return "" +} + +func (m *MsgDelegate) GetAmount() types1.Coin { + if m != nil { + return m.Amount + } + return types1.Coin{} +} + // MsgDelegateResponse defines the Msg/Delegate response type. type MsgDelegateResponse struct { } @@ -319,6 +340,34 @@ func (m *MsgBeginRedelegate) XXX_DiscardUnknown() { var xxx_messageInfo_MsgBeginRedelegate proto.InternalMessageInfo +func (m *MsgBeginRedelegate) GetDelegatorAddress() string { + if m != nil { + return m.DelegatorAddress + } + return "" +} + +func (m *MsgBeginRedelegate) GetValidatorSrcAddress() string { + if m != nil { + return m.ValidatorSrcAddress + } + return "" +} + +func (m *MsgBeginRedelegate) GetValidatorDstAddress() string { + if m != nil { + return m.ValidatorDstAddress + } + return "" +} + +func (m *MsgBeginRedelegate) GetAmount() types1.Coin { + if m != nil { + return m.Amount + } + return types1.Coin{} +} + // MsgBeginRedelegateResponse defines the Msg/BeginRedelegate response type. type MsgBeginRedelegateResponse struct { CompletionTime time.Time `protobuf:"bytes,1,opt,name=completion_time,json=completionTime,proto3,stdtime" json:"completion_time"` @@ -405,6 +454,27 @@ func (m *MsgUndelegate) XXX_DiscardUnknown() { var xxx_messageInfo_MsgUndelegate proto.InternalMessageInfo +func (m *MsgUndelegate) GetDelegatorAddress() string { + if m != nil { + return m.DelegatorAddress + } + return "" +} + +func (m *MsgUndelegate) GetValidatorAddress() string { + if m != nil { + return m.ValidatorAddress + } + return "" +} + +func (m *MsgUndelegate) GetAmount() types1.Coin { + if m != nil { + return m.Amount + } + return types1.Coin{} +} + // MsgUndelegateResponse defines the Msg/Undelegate response type. type MsgUndelegateResponse struct { CompletionTime time.Time `protobuf:"bytes,1,opt,name=completion_time,json=completionTime,proto3,stdtime" json:"completion_time"` @@ -466,61 +536,61 @@ func init() { func init() { proto.RegisterFile("cosmos/staking/v1beta1/tx.proto", fileDescriptor_0926ef28816b35ab) } var fileDescriptor_0926ef28816b35ab = []byte{ - // 860 bytes of a gzipped FileDescriptorProto + // 861 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x56, 0x4d, 0x6b, 0xe3, 0x46, 0x18, 0xb6, 0x6c, 0xc7, 0x4d, 0x27, 0xe4, 0x4b, 0xf9, 0xc0, 0x11, 0xc1, 0x0a, 0x4a, 0x3f, 0x42, 0xdb, 0xc8, 0x4d, 0x4a, 0x29, 0xe4, 0x52, 0xe2, 0xb8, 0xa1, 0x21, 0x35, 0x14, 0x25, 0xed, 0xa1, 0x14, 0x8c, 0x3e, 0xc6, 0xaa, 0xb0, 0xa4, 0x51, 0x34, 0xe3, 0x10, 0x43, 0x7f, 0x40, 0x8f, 0x81, - 0xde, 0xf6, 0x94, 0x1f, 0xb1, 0x3f, 0x22, 0x2c, 0xec, 0x92, 0xe3, 0xb2, 0x07, 0xef, 0x92, 0xc0, - 0x92, 0xb3, 0x7f, 0xc1, 0xa2, 0xd1, 0x48, 0x96, 0xe5, 0x0f, 0x4c, 0x58, 0x5f, 0xf6, 0x64, 0x33, - 0xf3, 0xcc, 0xf3, 0xce, 0xfb, 0xbc, 0xcf, 0xbc, 0xaf, 0x80, 0xa8, 0x23, 0xec, 0x20, 0x5c, 0xc6, - 0x44, 0x6d, 0x5a, 0xae, 0x59, 0xbe, 0xdc, 0xd3, 0x20, 0x51, 0xf7, 0xca, 0xe4, 0x4a, 0xf6, 0x7c, - 0x44, 0x10, 0xbf, 0x1e, 0x02, 0x64, 0x06, 0x90, 0x19, 0x40, 0xd8, 0x30, 0x11, 0x32, 0x6d, 0x58, - 0xa6, 0x28, 0xad, 0xd5, 0x28, 0xab, 0x6e, 0x3b, 0x3c, 0x22, 0x88, 0xe9, 0x2d, 0x62, 0x39, 0x10, - 0x13, 0xd5, 0xf1, 0x18, 0x60, 0xd5, 0x44, 0x26, 0xa2, 0x7f, 0xcb, 0xc1, 0x3f, 0xb6, 0xba, 0x11, - 0x46, 0xaa, 0x87, 0x1b, 0x2c, 0x6c, 0xb8, 0x55, 0x62, 0xb7, 0xd4, 0x54, 0x0c, 0xe3, 0x2b, 0xea, - 0xc8, 0x72, 0xd9, 0xfe, 0x17, 0x23, 0xb2, 0x88, 0x2e, 0x4d, 0x51, 0xd2, 0xcb, 0x3c, 0xe0, 0x6b, - 0xd8, 0x3c, 0xf2, 0xa1, 0x4a, 0xe0, 0x9f, 0xaa, 0x6d, 0x19, 0x2a, 0x41, 0x3e, 0x7f, 0x0a, 0xe6, - 0x0c, 0x88, 0x75, 0xdf, 0xf2, 0x88, 0x85, 0xdc, 0x22, 0xb7, 0xc5, 0xed, 0xcc, 0xed, 0x6f, 0xcb, - 0xc3, 0xf3, 0x96, 0xab, 0x3d, 0x68, 0x25, 0x7f, 0xdb, 0x11, 0x33, 0x4a, 0xf2, 0x34, 0x5f, 0x03, - 0x40, 0x47, 0x8e, 0x63, 0x61, 0x1c, 0x70, 0x65, 0x29, 0xd7, 0xd7, 0xa3, 0xb8, 0x8e, 0x62, 0xa4, - 0xa2, 0x12, 0x88, 0x19, 0x5f, 0x82, 0x80, 0xff, 0x17, 0xac, 0x38, 0x96, 0x5b, 0xc7, 0xd0, 0x6e, - 0xd4, 0x0d, 0x68, 0x43, 0x53, 0xa5, 0x77, 0xcc, 0x6d, 0x71, 0x3b, 0x9f, 0x57, 0x7e, 0x0b, 0xe0, - 0x6f, 0x3a, 0xe2, 0x57, 0xa6, 0x45, 0xfe, 0x69, 0x69, 0xb2, 0x8e, 0x1c, 0x26, 0x1b, 0xfb, 0xd9, - 0xc5, 0x46, 0xb3, 0x4c, 0xda, 0x1e, 0xc4, 0xf2, 0x89, 0x4b, 0xba, 0x1d, 0x51, 0x68, 0xab, 0x8e, - 0x7d, 0x20, 0x0d, 0xa1, 0x94, 0x94, 0x65, 0xc7, 0x72, 0xcf, 0xa0, 0xdd, 0xa8, 0xc6, 0x6b, 0xfc, - 0x09, 0x58, 0x66, 0x08, 0xe4, 0xd7, 0x55, 0xc3, 0xf0, 0x21, 0xc6, 0xc5, 0x3c, 0x8d, 0xbd, 0xd9, - 0xed, 0x88, 0xc5, 0x90, 0x6d, 0x00, 0x22, 0x29, 0x4b, 0xf1, 0xda, 0x61, 0xb8, 0x14, 0x50, 0x5d, - 0x46, 0x8a, 0xc7, 0x54, 0x33, 0x69, 0xaa, 0x01, 0x88, 0xa4, 0x2c, 0xc5, 0x6b, 0x11, 0xd5, 0x31, - 0x28, 0x78, 0x2d, 0xad, 0x09, 0xdb, 0xc5, 0x02, 0x95, 0x77, 0x55, 0x0e, 0xfd, 0x26, 0x47, 0x7e, - 0x93, 0x0f, 0xdd, 0x76, 0xa5, 0xf8, 0xe2, 0xf9, 0xee, 0x2a, 0xd3, 0x5d, 0xf7, 0xdb, 0x1e, 0x41, - 0xf2, 0xef, 0x2d, 0xed, 0x14, 0xb6, 0x15, 0x76, 0x9a, 0xff, 0x11, 0xcc, 0x5c, 0xaa, 0x76, 0x0b, - 0x16, 0x3f, 0xa3, 0x34, 0x1b, 0x51, 0x95, 0x02, 0x93, 0x25, 0x4a, 0x64, 0x45, 0x75, 0x0e, 0xd1, - 0x07, 0xb3, 0xff, 0xdd, 0x88, 0x99, 0xc7, 0x1b, 0x31, 0x23, 0x6d, 0x02, 0x61, 0xd0, 0x4e, 0x0a, - 0xc4, 0x1e, 0x72, 0x31, 0x94, 0xfe, 0xcf, 0x81, 0xa5, 0x1a, 0x36, 0x7f, 0x31, 0x2c, 0x32, 0x25, - 0xaf, 0xfd, 0x3c, 0x4c, 0xd3, 0x2c, 0xd5, 0x94, 0xef, 0x76, 0xc4, 0x85, 0x50, 0xd3, 0x31, 0x4a, - 0x3a, 0x60, 0xb1, 0xe7, 0xb5, 0xba, 0xaf, 0x12, 0xc8, 0x9c, 0x55, 0x9d, 0xd0, 0x55, 0x55, 0xa8, - 0x77, 0x3b, 0xe2, 0x7a, 0x18, 0x28, 0x45, 0x25, 0x29, 0x0b, 0x7a, 0x9f, 0xbf, 0xf9, 0xab, 0xe1, - 0x66, 0x0e, 0x0d, 0xf5, 0xeb, 0x14, 0x8d, 0x9c, 0xa8, 0x99, 0x00, 0x8a, 0xe9, 0xa2, 0xc4, 0x15, - 0x7b, 0xcf, 0x81, 0xb9, 0x1a, 0x36, 0xd9, 0x39, 0x38, 0xdc, 0xfe, 0xdc, 0xc7, 0xb3, 0x7f, 0xf6, - 0x49, 0xf6, 0xff, 0x09, 0x14, 0x54, 0x07, 0xb5, 0x5c, 0x42, 0x6b, 0x35, 0x81, 0x6f, 0x19, 0x3c, - 0x21, 0xc2, 0x1a, 0x58, 0x49, 0xe4, 0x19, 0xe7, 0xff, 0x2a, 0x4b, 0xfb, 0x63, 0x05, 0x9a, 0x96, - 0xab, 0x40, 0x63, 0x0a, 0x32, 0x9c, 0x83, 0xb5, 0x5e, 0x8e, 0xd8, 0xd7, 0x53, 0x52, 0x6c, 0x75, - 0x3b, 0xe2, 0x66, 0x5a, 0x8a, 0x04, 0x4c, 0x52, 0x56, 0xe2, 0xf5, 0x33, 0x5f, 0x1f, 0xca, 0x6a, - 0x60, 0x12, 0xb3, 0xe6, 0x46, 0xb3, 0x26, 0x60, 0x49, 0xd6, 0x2a, 0x26, 0x83, 0x3a, 0xe7, 0x9f, - 0xaa, 0x73, 0x93, 0x36, 0x88, 0x94, 0x9e, 0x91, 0xdc, 0x7c, 0x8d, 0xbe, 0x3e, 0xcf, 0x86, 0x81, - 0x45, 0xeb, 0xc1, 0x8c, 0x64, 0xfd, 0x40, 0x18, 0x68, 0x68, 0xe7, 0xd1, 0x00, 0xad, 0xcc, 0x06, - 0xa1, 0xae, 0xdf, 0x8a, 0x1c, 0x7d, 0x5d, 0xec, 0x70, 0xb0, 0x2d, 0x3d, 0x72, 0x60, 0xbe, 0x86, - 0xcd, 0x3f, 0x5c, 0xe3, 0x93, 0xf7, 0x6f, 0x03, 0xac, 0xf5, 0x65, 0x3a, 0x25, 0x49, 0xf7, 0x9f, - 0xe5, 0x41, 0xae, 0x86, 0x4d, 0xfe, 0x02, 0x2c, 0xa6, 0x3f, 0x1a, 0xbe, 0x19, 0xd5, 0xb3, 0x07, - 0x27, 0x82, 0xb0, 0x3f, 0x39, 0x36, 0xce, 0xa4, 0x09, 0xe6, 0xfb, 0x27, 0xc7, 0xce, 0x18, 0x92, - 0x3e, 0xa4, 0xf0, 0xfd, 0xa4, 0xc8, 0x38, 0xd8, 0xdf, 0x60, 0x36, 0x6e, 0x7a, 0xdb, 0x63, 0x4e, - 0x47, 0x20, 0xe1, 0xdb, 0x09, 0x40, 0x31, 0xfb, 0x05, 0x58, 0x4c, 0xb7, 0x94, 0x71, 0xea, 0xa5, - 0xb0, 0x63, 0xd5, 0x1b, 0xf5, 0xb4, 0x34, 0x00, 0x12, 0xef, 0xe0, 0xcb, 0x31, 0x0c, 0x3d, 0x98, - 0xb0, 0x3b, 0x11, 0x2c, 0x8a, 0x51, 0x39, 0xbe, 0xbd, 0x2f, 0x71, 0x77, 0xf7, 0x25, 0xee, 0xdd, - 0x7d, 0x89, 0xbb, 0x7e, 0x28, 0x65, 0xee, 0x1e, 0x4a, 0x99, 0xd7, 0x0f, 0xa5, 0xcc, 0x5f, 0xdf, - 0x8d, 0x1d, 0x63, 0x57, 0xf1, 0x57, 0x2a, 0x1d, 0x68, 0x5a, 0x81, 0x5a, 0xf2, 0x87, 0x0f, 0x01, - 0x00, 0x00, 0xff, 0xff, 0xa1, 0x2b, 0xfd, 0x07, 0x8a, 0x0b, 0x00, 0x00, + 0xde, 0x7a, 0xca, 0x8f, 0xe8, 0x8f, 0x08, 0x81, 0x85, 0x1c, 0x97, 0x3d, 0x78, 0x17, 0x67, 0x0f, + 0x7b, 0xf6, 0x2f, 0x58, 0x34, 0x1a, 0xc9, 0xb2, 0xfc, 0x81, 0x09, 0xeb, 0xcb, 0x9e, 0x6c, 0x66, + 0x9e, 0x79, 0xde, 0x79, 0x9f, 0xf7, 0x99, 0xf7, 0x15, 0x10, 0x75, 0x84, 0x1d, 0x84, 0xcb, 0x98, + 0xa8, 0x4d, 0xcb, 0x35, 0xcb, 0xd7, 0x07, 0x1a, 0x24, 0xea, 0x41, 0x99, 0xdc, 0xc8, 0x9e, 0x8f, + 0x08, 0xe2, 0x37, 0x43, 0x80, 0xcc, 0x00, 0x32, 0x03, 0x08, 0x5b, 0x26, 0x42, 0xa6, 0x0d, 0xcb, + 0x14, 0xa5, 0xb5, 0x1a, 0x65, 0xd5, 0x6d, 0x87, 0x47, 0x04, 0x31, 0xbd, 0x45, 0x2c, 0x07, 0x62, + 0xa2, 0x3a, 0x1e, 0x03, 0xac, 0x9b, 0xc8, 0x44, 0xf4, 0x6f, 0x39, 0xf8, 0xc7, 0x56, 0xb7, 0xc2, + 0x48, 0xf5, 0x70, 0x83, 0x85, 0x0d, 0xb7, 0x4a, 0xec, 0x96, 0x9a, 0x8a, 0x61, 0x7c, 0x45, 0x1d, + 0x59, 0x2e, 0xdb, 0xff, 0x6c, 0x4c, 0x16, 0xd1, 0xa5, 0x29, 0x4a, 0x7a, 0x91, 0x07, 0x7c, 0x0d, + 0x9b, 0x27, 0x3e, 0x54, 0x09, 0xfc, 0x5d, 0xb5, 0x2d, 0x43, 0x25, 0xc8, 0xe7, 0xcf, 0xc1, 0x82, + 0x01, 0xb1, 0xee, 0x5b, 0x1e, 0xb1, 0x90, 0x5b, 0xe4, 0x76, 0xb8, 0xbd, 0x85, 0xc3, 0x5d, 0x79, + 0x74, 0xde, 0x72, 0xb5, 0x0f, 0xad, 0xe4, 0xef, 0x3b, 0x62, 0x46, 0x49, 0x9e, 0xe6, 0x6b, 0x00, + 0xe8, 0xc8, 0x71, 0x2c, 0x8c, 0x03, 0xae, 0x2c, 0xe5, 0xfa, 0x72, 0x1c, 0xd7, 0x49, 0x8c, 0x54, + 0x54, 0x02, 0x31, 0xe3, 0x4b, 0x10, 0xf0, 0x7f, 0x83, 0x35, 0xc7, 0x72, 0xeb, 0x18, 0xda, 0x8d, + 0xba, 0x01, 0x6d, 0x68, 0xaa, 0xf4, 0x8e, 0xb9, 0x1d, 0x6e, 0xef, 0xd3, 0xca, 0x2f, 0x01, 0xfc, + 0x55, 0x47, 0xfc, 0xc2, 0xb4, 0xc8, 0x5f, 0x2d, 0x4d, 0xd6, 0x91, 0xc3, 0x64, 0x63, 0x3f, 0xfb, + 0xd8, 0x68, 0x96, 0x49, 0xdb, 0x83, 0x58, 0x3e, 0x73, 0x49, 0xaf, 0x23, 0x0a, 0x6d, 0xd5, 0xb1, + 0x8f, 0xa4, 0x11, 0x94, 0x92, 0xb2, 0xea, 0x58, 0xee, 0x05, 0xb4, 0x1b, 0xd5, 0x78, 0x8d, 0x3f, + 0x03, 0xab, 0x0c, 0x81, 0xfc, 0xba, 0x6a, 0x18, 0x3e, 0xc4, 0xb8, 0x98, 0xa7, 0xb1, 0xb7, 0x7b, + 0x1d, 0xb1, 0x18, 0xb2, 0x0d, 0x41, 0x24, 0x65, 0x25, 0x5e, 0x3b, 0x0e, 0x97, 0x02, 0xaa, 0xeb, + 0x48, 0xf1, 0x98, 0x6a, 0x2e, 0x4d, 0x35, 0x04, 0x91, 0x94, 0x95, 0x78, 0x2d, 0xa2, 0x3a, 0x05, + 0x05, 0xaf, 0xa5, 0x35, 0x61, 0xbb, 0x58, 0xa0, 0xf2, 0xae, 0xcb, 0xa1, 0xdf, 0xe4, 0xc8, 0x6f, + 0xf2, 0xb1, 0xdb, 0xae, 0x14, 0x1f, 0xfe, 0xdf, 0x5f, 0x67, 0xba, 0xeb, 0x7e, 0xdb, 0x23, 0x48, + 0xfe, 0xb5, 0xa5, 0x9d, 0xc3, 0xb6, 0xc2, 0x4e, 0xf3, 0xdf, 0x83, 0xb9, 0x6b, 0xd5, 0x6e, 0xc1, + 0xe2, 0x27, 0x94, 0x66, 0x2b, 0xaa, 0x52, 0x60, 0xb2, 0x44, 0x89, 0xac, 0xa8, 0xce, 0x21, 0xfa, + 0x68, 0xfe, 0x9f, 0x3b, 0x31, 0xf3, 0xee, 0x4e, 0xcc, 0x48, 0xdb, 0x40, 0x18, 0xb6, 0x93, 0x02, + 0xb1, 0x87, 0x5c, 0x0c, 0xa5, 0x7f, 0x73, 0x60, 0xa5, 0x86, 0xcd, 0x9f, 0x0c, 0x8b, 0xcc, 0xc8, + 0x6b, 0x3f, 0x8e, 0xd2, 0x34, 0x4b, 0x35, 0xe5, 0x7b, 0x1d, 0x71, 0x29, 0xd4, 0x74, 0x82, 0x92, + 0x0e, 0x58, 0xee, 0x7b, 0xad, 0xee, 0xab, 0x04, 0x32, 0x67, 0x55, 0xa7, 0x74, 0x55, 0x15, 0xea, + 0xbd, 0x8e, 0xb8, 0x19, 0x06, 0x4a, 0x51, 0x49, 0xca, 0x92, 0x3e, 0xe0, 0x6f, 0xfe, 0x66, 0xb4, + 0x99, 0x43, 0x43, 0xfd, 0x3c, 0x43, 0x23, 0x27, 0x6a, 0x26, 0x80, 0x62, 0xba, 0x28, 0x71, 0xc5, + 0xba, 0x1c, 0x58, 0xa8, 0x61, 0x93, 0x9d, 0x83, 0xa3, 0xed, 0xcf, 0x7d, 0x38, 0xfb, 0x67, 0x9f, + 0x65, 0xff, 0x1f, 0x40, 0x41, 0x75, 0x50, 0xcb, 0x25, 0xb4, 0x56, 0x53, 0xf8, 0x96, 0xc1, 0x8f, + 0xf2, 0x54, 0x80, 0x0d, 0xb0, 0x96, 0xc8, 0x31, 0xce, 0xfd, 0x21, 0x4b, 0x7b, 0x63, 0x05, 0x9a, + 0x96, 0xab, 0x40, 0x63, 0x06, 0x12, 0x5c, 0x82, 0x8d, 0x7e, 0x7e, 0xd8, 0xd7, 0x53, 0x32, 0xec, + 0xf4, 0x3a, 0xe2, 0x76, 0x5a, 0x86, 0x04, 0x4c, 0x52, 0xd6, 0xe2, 0xf5, 0x0b, 0x5f, 0x1f, 0xc9, + 0x6a, 0x60, 0x12, 0xb3, 0xe6, 0xc6, 0xb3, 0x26, 0x60, 0x49, 0xd6, 0x2a, 0x26, 0xc3, 0x1a, 0xe7, + 0x9f, 0xa3, 0x71, 0x93, 0x36, 0x86, 0x94, 0x96, 0x91, 0xd4, 0x7c, 0x8d, 0xbe, 0x3a, 0xcf, 0x86, + 0x81, 0x35, 0xeb, 0xc1, 0x6c, 0x64, 0x7d, 0x40, 0x18, 0x6a, 0x64, 0x97, 0xd1, 0xe0, 0xac, 0xcc, + 0x07, 0x61, 0x6e, 0x5f, 0x8b, 0x1c, 0x7d, 0x55, 0xec, 0x70, 0xb0, 0x2d, 0xbd, 0xe5, 0xc0, 0x62, + 0x0d, 0x9b, 0xbf, 0xb9, 0xc6, 0x47, 0xed, 0xdb, 0x06, 0xd8, 0x18, 0xc8, 0x72, 0x46, 0x72, 0x1e, + 0xfe, 0x97, 0x07, 0xb9, 0x1a, 0x36, 0xf9, 0x2b, 0xb0, 0x9c, 0xfe, 0x50, 0xf8, 0x6a, 0x5c, 0x9f, + 0x1e, 0x9e, 0x02, 0xc2, 0xe1, 0xf4, 0xd8, 0x38, 0x93, 0x26, 0x58, 0x1c, 0x9c, 0x16, 0x7b, 0x13, + 0x48, 0x06, 0x90, 0xc2, 0xb7, 0xd3, 0x22, 0xe3, 0x60, 0x7f, 0x82, 0xf9, 0xb8, 0xd1, 0xed, 0x4e, + 0x38, 0x1d, 0x81, 0x84, 0xaf, 0xa7, 0x00, 0xc5, 0xec, 0x57, 0x60, 0x39, 0xdd, 0x4a, 0x26, 0xa9, + 0x97, 0xc2, 0x4e, 0x54, 0x6f, 0xdc, 0xb3, 0xd2, 0x00, 0x48, 0xbc, 0x81, 0xcf, 0x27, 0x30, 0xf4, + 0x61, 0xc2, 0xfe, 0x54, 0xb0, 0x28, 0x46, 0xe5, 0xf4, 0xbe, 0x5b, 0xe2, 0x1e, 0xbb, 0x25, 0xee, + 0x4d, 0xb7, 0xc4, 0xdd, 0x3e, 0x95, 0x32, 0x8f, 0x4f, 0xa5, 0xcc, 0xcb, 0xa7, 0x52, 0xe6, 0x8f, + 0x6f, 0x26, 0x8e, 0xae, 0x9b, 0xf8, 0xcb, 0x94, 0x0e, 0x31, 0xad, 0x40, 0x2d, 0xf9, 0xdd, 0xfb, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xdc, 0x0e, 0x15, 0x7e, 0x0b, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/x/staking/types/validator.go b/x/staking/types/validator.go index 0687e0071a..de41bd8a01 100644 --- a/x/staking/types/validator.go +++ b/x/staking/types/validator.go @@ -38,6 +38,7 @@ var ( var _ ValidatorI = Validator{} // NewValidator constructs a new Validator +// //nolint:interfacer func NewValidator(operator sdk.ValAddress, pubKey cryptotypes.PubKey, description Description) (Validator, error) { pkAny, err := codectypes.NewAnyWithValue(pubKey) @@ -406,7 +407,8 @@ func (v Validator) RemoveTokens(tokens sdk.Int) Validator { // RemoveDelShares removes delegator shares from a validator. // NOTE: because token fractions are left in the valiadator, -// the exchange rate of future shares of this validator can increase. +// +// the exchange rate of future shares of this validator can increase. func (v Validator) RemoveDelShares(delShares sdk.Dec) (Validator, sdk.Int) { remainingShares := v.DelegatorShares.Sub(delShares) diff --git a/x/upgrade/abci.go b/x/upgrade/abci.go index a9f6c08262..2bfe02dfab 100644 --- a/x/upgrade/abci.go +++ b/x/upgrade/abci.go @@ -59,7 +59,7 @@ func BeginBlocker(k keeper.Keeper, ctx sdk.Context, _ abci.RequestBeginBlock) { if !k.HasHandler(plan.Name) { // Write the upgrade info to disk. The UpgradeStoreLoader uses this info to perform or skip // store migrations. - err := k.DumpUpgradeInfoWithInfoToDisk(ctx.BlockHeight(), plan.Name, plan.Info) + err := k.DumpUpgradeInfoWithInfoToDisk(ctx.BlockHeight(), plan.Name, plan.Info) //nolint:staticcheck if err != nil { panic(fmt.Errorf("unable to write upgrade info to filesystem: %s", err.Error())) } diff --git a/x/upgrade/abci_test.go b/x/upgrade/abci_test.go index cbda3e429b..8744e90e08 100644 --- a/x/upgrade/abci_test.go +++ b/x/upgrade/abci_test.go @@ -4,7 +4,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "testing" "time" @@ -395,7 +394,7 @@ func TestDumpUpgradeInfoToFile(t *testing.T) { upgradeInfoFilePath, err := s.keeper.GetUpgradeInfoPath() require.Nil(t, err) - data, err := ioutil.ReadFile(upgradeInfoFilePath) + data, err := os.ReadFile(upgradeInfoFilePath) require.NoError(t, err) var upgradeInfo storetypes.UpgradeInfo diff --git a/x/upgrade/doc.go b/x/upgrade/doc.go index b9d696914f..1ba1614c32 100644 --- a/x/upgrade/doc.go +++ b/x/upgrade/doc.go @@ -7,7 +7,7 @@ Without software support for upgrades, upgrading a live chain is risky because a their state machines at exactly the same point in the process. If this is not done correctly, there can be state inconsistencies which are hard to recover from. -General Workflow +# General Workflow Let's assume we are running v0.38.0 of our software in our testnet and want to upgrade to v0.40.0. How would this look in practice? First of all, we want to finalize the v0.40.0 release candidate @@ -40,25 +40,27 @@ the rest of the block as normal. Once 2/3 of the voting power has upgraded, the resume the consensus mechanism. If the majority of operators add a custom `do-upgrade` script, this should be a matter of minutes and not even require them to be awake at that time. -Integrating With An App +# Integrating With An App Setup an upgrade Keeper for the app and then define a BeginBlocker that calls the upgrade keeper's BeginBlocker method: - func (app *myApp) BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock) abci.ResponseBeginBlock { - app.upgradeKeeper.BeginBlocker(ctx, req) - return abci.ResponseBeginBlock{} - } + + func (app *myApp) BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock) abci.ResponseBeginBlock { + app.upgradeKeeper.BeginBlocker(ctx, req) + return abci.ResponseBeginBlock{} + } The app must then integrate the upgrade keeper with its governance module as appropriate. The governance module should call ScheduleUpgrade to schedule an upgrade and ClearUpgradePlan to cancel a pending upgrade. -Performing Upgrades +# Performing Upgrades Upgrades can be scheduled at a predefined block height. Once this block height is reached, the existing software will cease to process ABCI messages and a new version with code that handles the upgrade must be deployed. All upgrades are coordinated by a unique upgrade name that cannot be reused on the same blockchain. In order for the upgrade module to know that the upgrade has been safely applied, a handler with the name of the upgrade must be installed. Here is an example handler for an upgrade named "my-fancy-upgrade": + app.upgradeKeeper.SetUpgradeHandler("my-fancy-upgrade", func(ctx sdk.Context, plan upgrade.Plan) { // Perform any migrations of the state store needed for this upgrade }) @@ -93,18 +95,20 @@ Here is a sample code to set store migrations with an upgrade: app.SetStoreLoader(upgrade.UpgradeStoreLoader(upgradeInfo.Height, &storeUpgrades)) } -Halt Behavior +# Halt Behavior Before halting the ABCI state machine in the BeginBlocker method, the upgrade module will log an error that looks like: + UPGRADE "" NEEDED at height : + where Name are Info are the values of the respective fields on the upgrade Plan. To perform the actual halt of the blockchain, the upgrade keeper simply panics which prevents the ABCI state machine from proceeding but doesn't actually exit the process. Exiting the process can cause issues for other nodes that start to lose connectivity with the exiting nodes, thus this module prefers to just halt but not exit. -Automation and Plan.Info +# Automation and Plan.Info We have deprecated calling out to scripts, instead with propose https://github.com/cosmos/cosmos-sdk/tree/v0.40.0-rc5/cosmovisor as a model for a watcher daemon that can launch simd as a subprocess and then read the upgrade log message @@ -113,7 +117,7 @@ specified here https://github.com/cosmos/cosmos-sdk/tree/v0.40.0-rc5/cosmovisor/ This will allow a properly configured cosmsod daemon to auto-download new binaries and auto-upgrade. As noted there, this is intended more for full nodes than validators. -Cancelling Upgrades +# Cancelling Upgrades There are two ways to cancel a planned upgrade - with on-chain governance or off-chain social consensus. For the first one, there is a CancelSoftwareUpgrade proposal type, which can be voted on and will @@ -134,6 +138,7 @@ If over two-thirds run their nodes with this flag on the old binary, it will all the upgrade with a manual override. (This must be well-documented for anyone syncing from genesis later on). Example: + simd start --unsafe-skip-upgrades ... NOTE: Here simd is used as an example binary, replace it with original binary diff --git a/x/upgrade/keeper/keeper.go b/x/upgrade/keeper/keeper.go index 79b761b726..5059bce2ce 100644 --- a/x/upgrade/keeper/keeper.go +++ b/x/upgrade/keeper/keeper.go @@ -4,7 +4,6 @@ import ( "encoding/binary" "encoding/json" "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -424,7 +423,7 @@ func (k Keeper) ReadUpgradeInfoFromDisk() (store.UpgradeInfo, error) { return upgradeInfo, err } - data, err := ioutil.ReadFile(upgradeInfoPath) + data, err := os.ReadFile(upgradeInfoPath) if err != nil { // if file does not exist, assume there are no upgrades if os.IsNotExist(err) { diff --git a/x/upgrade/types/storeloader_test.go b/x/upgrade/types/storeloader_test.go index c138b8e252..1f3aae0036 100644 --- a/x/upgrade/types/storeloader_test.go +++ b/x/upgrade/types/storeloader_test.go @@ -2,7 +2,6 @@ package types import ( "encoding/json" - "io/ioutil" "os" "path/filepath" "testing" @@ -77,7 +76,7 @@ func TestSetLoader(t *testing.T) { data, err := json.Marshal(upgradeInfo) require.NoError(t, err) - err = ioutil.WriteFile(upgradeInfoFilePath, data, 0o644) + err = os.WriteFile(upgradeInfoFilePath, data, 0o644) require.NoError(t, err) // make sure it exists before running everything