diff --git a/CHANGELOG.md b/CHANGELOG.md index 99372d08e2d3..c7ecbc5bfc31 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ Every module contains its own CHANGELOG.md. Please refer to the module you are i * (baseapp) [#20291](https://github.com/cosmos/cosmos-sdk/pull/20291) Simulate nested messages. * (client/keys) [#21829](https://github.com/cosmos/cosmos-sdk/pull/21829) Add support for importing hex key using standard input. +* (x/auth/ante) [#23128](https://github.com/cosmos/cosmos-sdk/pull/23128) Allow custom verifyIsOnCurve when validate tx for public key like ethsecp256k1. ### Improvements diff --git a/collections/README.md b/collections/README.md index f2bca5101bc3..b8d8a62f1d93 100644 --- a/collections/README.md +++ b/collections/README.md @@ -90,7 +90,7 @@ Since a module can have multiple collections, the following is expected: We don't want a collection to write over the state of the other collection so we pass it a prefix, which defines a storage partition owned by the collection. -If you already built modules, the prefix translates to the items you were creating in your ``types/keys.go`` file, example: https://github.com/cosmos/cosmos-sdk/blob/main/x/feegrant/key.go#L27 +If you already built modules, the prefix translates to the items you were creating in your ``types/keys.go`` file, example: https://github.com/cosmos/cosmos-sdk/blob/v0.52.0-rc.1/x/feegrant/key.go#L16~L22 your old: diff --git a/docs/architecture/adr-069-gov-improvements.md b/docs/architecture/adr-069-gov-improvements.md index 1ef6971c713d..0e3b62ada24b 100644 --- a/docs/architecture/adr-069-gov-improvements.md +++ b/docs/architecture/adr-069-gov-improvements.md @@ -66,8 +66,8 @@ Voter can only vote NO on the proposal. If the NO threshold is reached, the opti Two governance parameters will be in added [`v1.Params`][5] to support optimistic proposals: ```protobuf -// optimistic_authorized_addreses is an optional governance parameter that limits the authorized accounts that can submit optimistic proposals -repeated string optimistic_authorized_addreses = 17 [(cosmos_proto.scalar) = "cosmos.AddressString"]; +// optimistic_authorized_addresses is an optional governance parameter that limits the authorized accounts that can submit optimistic proposals +repeated string optimistic_authorized_addresses = 17 [(cosmos_proto.scalar) = "cosmos.AddressString"]; // Optimistic rejected threshold defines at which percentage of NO votes, the optimistic proposal should fail and be converted to a standard proposal. string optimistic_rejected_threshold = 18 [(cosmos_proto.scalar) = "cosmos.Dec"]; diff --git a/server/v2/appmanager/README.md b/server/v2/appmanager/README.md new file mode 100644 index 000000000000..2ffaafa11a48 --- /dev/null +++ b/server/v2/appmanager/README.md @@ -0,0 +1,234 @@ +# AppManager Documentation + +The AppManager serves as a high-level coordinator, delegating most operations to the STF while managing state access through the Store interface. + +This document outlines the main external calls in the AppManager package, their execution flows, and dependencies. + +## Table of Contents +- [InitGenesis](#initgenesis) +- [ExportGenesis](#exportgenesis) +- [DeliverBlock](#deliverblock) +- [ValidateTx](#validatetx) +- [Simulate](#simulate) +- [SimulateWithState](#simulatewithstate) +- [Query](#query) +- [QueryWithState](#querywithstate) + +## InitGenesis + +InitGenesis initializes the genesis state of the application. + +```mermaid +sequenceDiagram +participant Caller +participant AppManager +participant InitGenesisImpl +participant STF +participant State +Caller->>AppManager: InitGenesis(ctx, blockRequest, genesisJSON, decoder) +AppManager->>InitGenesisImpl: initGenesis(ctx, genesisJSON, txHandler) +loop For each genesis transaction +InitGenesisImpl->>InitGenesisImpl: Decode and collect transactions +end +InitGenesisImpl-->>AppManager: genesisState, validatorUpdates, error +AppManager->>STF: DeliverBlock(ctx, blockRequest, genesisState) +STF-->>AppManager: blockResponse, blockZeroState, error +AppManager->>State: Apply state changes +AppManager-->>Caller: blockResponse, genesisState, error +``` + +### Dependencies +- Required Input: + - Context + - BlockRequest + - Genesis JSON + - Transaction decoder +- Required Components: + - InitGenesis implementation + - STF + - Store interface + +## ExportGenesis + +ExportGenesis exports the current application state as genesis state. + +```mermaid +sequenceDiagram +participant Caller +participant AppManager +participant ExportGenesisImpl +Caller->>AppManager: ExportGenesis(ctx, version) +AppManager->>ExportGenesisImpl: exportGenesis(ctx, version) +ExportGenesisImpl-->>Caller: genesisJSON, error +``` + +### Dependencies +- Required Input: + - Context + - Version +- Required Components: + - ExportGenesis implementation + - Store interface + + +## DeliverBlock + +DeliverBlock processes a block of transactions. + +```mermaid +sequenceDiagram +participant Caller +participant AppManager +participant Store +participant STF +Caller->>AppManager: DeliverBlock(ctx, block) +AppManager->>Store: StateLatest() +Store-->>AppManager: version, currentState, error +AppManager->>STF: DeliverBlock(ctx, block, currentState) +STF-->>Caller: blockResponse, newState, error +``` + + +### Dependencies +- Required Input: + - Context + - BlockRequest +- Required Components: + - Store interface + - STF + +## ValidateTx + +ValidateTx validates a transaction against the latest state. + +```mermaid +sequenceDiagram +participant Caller +participant AppManager +participant Store +participant STF +Caller->>AppManager: ValidateTx(ctx, tx) +AppManager->>Store: StateLatest() +Store-->>AppManager: version, latestState, error +AppManager->>STF: ValidateTx(ctx, latestState, gasLimit, tx) +STF-->>Caller: TxResult, error +``` + + +### Dependencies +- Required Input: + - Context + - Transaction +- Required Components: + - Store interface + - STF + - Configuration (for gas limits) + +## Simulate + +Simulate executes a transaction simulation using the latest state. + +```mermaid +sequenceDiagram +participant Caller +participant AppManager +participant Store +participant STF +Caller->>AppManager: Simulate(ctx, tx) +AppManager->>Store: StateLatest() +Store-->>AppManager: version, state, error +AppManager->>STF: Simulate(ctx, state, gasLimit, tx) +STF-->>Caller: TxResult, WriterMap, error +``` + +### Dependencies +- Required Input: + - Context + - Transaction +- Required Components: + - Store interface + - STF + - Configuration (for gas limits) + +## SimulateWithState + +SimulateWithState executes a transaction simulation using provided state. + +```mermaid +sequenceDiagram +participant Caller +participant AppManager +participant STF +Caller->>AppManager: SimulateWithState(ctx, state, tx) +AppManager->>STF: Simulate(ctx, state, gasLimit, tx) +STF-->>Caller: TxResult, WriterMap, error +``` + +### Dependencies +- Required Input: + - Context + - Transaction + - State + + ## Query + +Query executes a query at a specific version. + +```mermaid +sequenceDiagram +participant Caller +participant AppManager +participant Store +participant STF +Caller->>AppManager: Query(ctx, version, request) +alt version == 0 +AppManager->>Store: StateLatest() +else version > 0 +AppManager->>Store: StateAt(version) +end +Store-->>AppManager: queryState, error +AppManager->>STF: Query(ctx, queryState, gasLimit, request) +STF-->>Caller: response, error +``` + +### Dependencies +- Required Input: + - Context + - Version (or 0 for latest) + - Query request +- Required Components: + - Store interface + - STF + - Configuration (for gas limits) + + ## QueryWithState + +QueryWithState executes a query using provided state. + +```mermaid +sequenceDiagram +participant Caller +participant AppManager +participant STF +Caller->>AppManager: QueryWithState(ctx, state, request) +AppManager->>STF: Query(ctx, state, gasLimit, request) +STF-->>Caller: response, error +``` + +### Dependencies +- Required Input: + - Context + - ReaderMap state + - Query request +- Required Components: + - STF + - Configuration (for gas limits) + +## Common Dependencies + +All operations depend on: +- Context management +- Error handling +- Gas metering +- State management (Store interface) +- STF interface \ No newline at end of file diff --git a/server/v2/stf/README.md b/server/v2/stf/README.md index 2f2ff1ca749f..e3ae1b5376ff 100644 --- a/server/v2/stf/README.md +++ b/server/v2/stf/README.md @@ -1,34 +1,152 @@ -# State Transition Function (STF) +# STF (State Transition Function) Documentation -STF is a function that takes a state and an action as input and returns the next state. It does not assume the execution model of the application nor consensus. +This document outlines the main external calls in the STF package, their execution flows, and dependencies. -The state transition function receives a read only instance of state. It does not directly write to disk, instead it will return the state changes which has undergone within the application. The state transition function is deterministic, meaning that given the same input, it will always produce the same output. +## Table of Contents +- [DeliverBlock](#deliverblock) +- [Simulate](#simulate) +- [ValidateTx](#validatetx) +- [Query](#query) -## BranchDB +## DeliverBlock -BranchDB is a cache of all the reads done within a block, simulation or transaction validation. It takes a read-only instance of state and creates its own write instance using a btree. After all state transitions are done, the new change sets are returned to the caller. +DeliverBlock is the main state transition function that processes an entire block of transactions. -The BranchDB can be replaced and optimized for specific use cases. The implementation is as follows +```mermaid +sequenceDiagram +participant Caller +participant STF +participant State +participant PreBlock +participant BeginBlock +participant TxProcessor +participant EndBlock +Caller->>STF: DeliverBlock(ctx, block, state) +STF->>State: Branch(state) +STF->>State: SetHeaderInfo +STF->>PreBlock: doPreBlock(ctx, txs) +STF->>BeginBlock: doBeginBlock(ctx) +loop For each transaction +STF->>TxProcessor: deliverTx(ctx, state, tx) +TxProcessor->>TxProcessor: validateTx() +TxProcessor->>TxProcessor: execTx() +TxProcessor-->>STF: TxResult +end +STF->>EndBlock: doEndBlock(ctx) +STF->>EndBlock: validatorUpdates(ctx) +STF-->>Caller: BlockResponse, newState, error +``` + +### Dependencies +- Required Input: + - Context + - BlockRequest containing transactions + - ReadOnly state +- Required Components: + - PreBlock handler + - BeginBlock handler + - EndBlock handler + - Transaction validator + - Message router + - Gas meter + +## Simulate + +Simulate executes a transaction without committing changes to the actual state. -```go - type branchdb func(state store.ReaderMap) store.WriterMap +```mermaid +sequenceDiagram +participant Caller +participant STF +participant State +participant TxProcessor +Caller->>STF: Simulate(ctx, state, gasLimit, tx) +STF->>State: Branch(state) +STF->>State: GetHeaderInfo() +STF->>TxProcessor: deliverTx(ctx, state, tx, SimulateMode) +TxProcessor-->>Caller: TxResult, simulationState ``` -## GasMeter +### Dependencies +- Required Input: + - Context + - ReadOnly state + - Gas limit + - Transaction +- Required Components: + - Transaction processor + - Gas meter + - Message router -GasMeter is a utility that keeps track of the gas consumed by the state transition function. It is used to limit the amount of computation that can be done within a block. +## ValidateTx + +ValidateTx performs transaction validation without execution. + +```mermaid +sequenceDiagram +participant Caller +participant STF +participant State +participant Validator +Caller->>STF: ValidateTx(ctx, state, gasLimit, tx) +STF->>State: Branch(state) +STF->>Validator: validateTx(ctx, state, gasLimit, tx) +Validator-->>Caller: TxResult +``` -The GasMeter can be replaced and optimized for specific use cases. The implementation is as follows: +### Dependencies +- Required Input: + - Context + - ReadOnly state + - Gas limit + - Transaction +- Required Components: + - Transaction validator + - Gas meter -```go -type ( - // gasMeter is a function type that takes a gas limit as input and returns a gas.Meter. - // It is used to measure and limit the amount of gas consumed during the execution of a function. - gasMeter func(gasLimit uint64) gas.Meter +## Query - // wrapGasMeter is a function type that wraps a gas meter and a store writer map. - wrapGasMeter func(meter gas.Meter, store store.WriterMap) store.WriterMap -) +Query executes a read-only query against the application state. + +```mermaid +sequenceDiagram +participant Caller +participant STF +participant State +participant QueryRouter +Caller->>STF: Query(ctx, state, gasLimit, req) +STF->>State: Branch(state) +STF->>State: GetHeaderInfo() +STF->>QueryRouter: Invoke(ctx, req) +QueryRouter-->>Caller: Response, error ``` -THe wrapGasMeter is used in order to consume gas. Application developers can seamlsessly replace the gas meter with their own implementation in order to customize consumption of gas. +### Dependencies +- Required Input: + - Context + - ReadOnly state + - Gas limit + - Query request message +- Required Components: + - Query router + - Gas meter + - Message handlers + +## Error Handling + +All operations include error handling for: +- Context cancellation +- Gas limit exceeded +- Invalid transactions +- State operation failures +- Panic recovery (in transaction execution) + +## Gas Management + +Gas is tracked and limited for: +- Transaction validation +- Message execution +- State operations +- Query execution + +Each operation that consumes gas uses a gas meter to track usage and ensure limits are not exceeded. \ No newline at end of file diff --git a/server/v2/store/snapshot.go b/server/v2/store/snapshot.go index 0fa2b67c708e..a86fb355e406 100644 --- a/server/v2/store/snapshot.go +++ b/server/v2/store/snapshot.go @@ -177,7 +177,7 @@ func (s *Server[T]) DumpArchiveCmd() *cobra.Command { Use: "dump ", Short: "Dump the snapshot as portable archive format", Args: cobra.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, args []string) (err error) { v := serverv2.GetViperFromCmd(cmd) snapshotStore, err := snapshots.NewStore(filepath.Join(v.GetString(serverv2.FlagHome), "data", "snapshots")) if err != nil { @@ -220,7 +220,9 @@ func (s *Server[T]) DumpArchiveCmd() *cobra.Command { if err != nil { return err } - defer fp.Close() + defer func() { + err = errors.Join(err, fp.Close()) + }() // since the chunk files are already compressed, we just use fastest compression here gzipWriter, err := gzip.NewWriterLevel(fp, gzip.BestSpeed) @@ -255,7 +257,7 @@ func (s *Server[T]) DumpArchiveCmd() *cobra.Command { return fmt.Errorf("failed to close gzip writer: %w", err) } - return fp.Close() + return nil }, } diff --git a/store/v2/commitment/store_bench_test.go b/store/v2/commitment/store_bench_test.go index c8343d942b72..f04ec4d0717b 100644 --- a/store/v2/commitment/store_bench_test.go +++ b/store/v2/commitment/store_bench_test.go @@ -1,6 +1,3 @@ -//go:build rocksdb -// +build rocksdb - package commitment_test import ( @@ -20,9 +17,6 @@ import ( var ( storeKeys = []string{"store1", "store2", "store3"} dbBackends = map[string]func(dataDir string) (corestore.KVStoreWithBatch, error){ - "rocksdb_opts": func(dataDir string) (corestore.KVStoreWithBatch, error) { - return dbm.NewRocksDB("test", dataDir) - }, "pebbledb_opts": func(dataDir string) (corestore.KVStoreWithBatch, error) { return dbm.NewPebbleDB("test", dataDir) }, diff --git a/store/v2/db/db.go b/store/v2/db/db.go index 3be46d750f39..dbe67e59792c 100644 --- a/store/v2/db/db.go +++ b/store/v2/db/db.go @@ -11,7 +11,6 @@ type DBType string const ( DBTypeGoLevelDB DBType = "goleveldb" - DBTypeRocksDB DBType = "rocksdb" DBTypePebbleDB DBType = "pebbledb" DBTypePrefixDB DBType = "prefixdb" diff --git a/store/v2/db/pebbledb.go b/store/v2/db/pebbledb.go index c4eb2c0122bf..e6f4d67a97bd 100644 --- a/store/v2/db/pebbledb.go +++ b/store/v2/db/pebbledb.go @@ -72,10 +72,6 @@ func (db *PebbleDB) Get(key []byte) ([]byte, error) { return nil, fmt.Errorf("failed to perform PebbleDB read: %w", err) } - if len(bz) == 0 { - return nil, closer.Close() - } - return bz, closer.Close() } diff --git a/store/v2/db/rocksdb.go b/store/v2/db/rocksdb.go deleted file mode 100644 index 5ef7e64da4e7..000000000000 --- a/store/v2/db/rocksdb.go +++ /dev/null @@ -1,348 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package db - -import ( - "bytes" - "fmt" - "path/filepath" - "runtime" - "slices" - - "github.com/linxGnu/grocksdb" - - corestore "cosmossdk.io/core/store" - storeerrors "cosmossdk.io/store/v2/errors" -) - -var ( - _ corestore.KVStoreWithBatch = (*RocksDB)(nil) - - defaultReadOpts = grocksdb.NewDefaultReadOptions() -) - -// RocksDB implements `corestore.KVStoreWithBatch`, using RocksDB as the underlying storage engine. -// It is only used for store v2 migration, since some clients use RocksDB as -// the IAVL v0/v1 backend. -type RocksDB struct { - storage *grocksdb.DB -} - -// defaultRocksdbOptions is good enough for most cases, including heavy workloads. -// 1GB table cache, 512MB write buffer(may use 50% more on heavy workloads). -// compression: snappy as default, use `-lsnappy` flag to enable. -func defaultRocksdbOptions() *grocksdb.Options { - bbto := grocksdb.NewDefaultBlockBasedTableOptions() - bbto.SetBlockCache(grocksdb.NewLRUCache(1 << 30)) - bbto.SetFilterPolicy(grocksdb.NewBloomFilter(10)) - - rocksdbOpts := grocksdb.NewDefaultOptions() - rocksdbOpts.SetBlockBasedTableFactory(bbto) - // SetMaxOpenFiles to 4096 seems to provide a reliable performance boost - rocksdbOpts.SetMaxOpenFiles(4096) - rocksdbOpts.SetCreateIfMissing(true) - rocksdbOpts.IncreaseParallelism(runtime.NumCPU()) - // 1.5GB maximum memory use for writebuffer. - rocksdbOpts.OptimizeLevelStyleCompaction(512 * 1024 * 1024) - return rocksdbOpts -} - -func NewRocksDB(name, dataDir string) (*RocksDB, error) { - opts := defaultRocksdbOptions() - opts.SetCreateIfMissing(true) - - return NewRocksDBWithOpts(name, dataDir, opts) -} - -func NewRocksDBWithOpts(name, dataDir string, opts *grocksdb.Options) (*RocksDB, error) { - dbPath := filepath.Join(dataDir, name+DBFileSuffix) - storage, err := grocksdb.OpenDb(opts, dbPath) - if err != nil { - return nil, fmt.Errorf("failed to open RocksDB: %w", err) - } - - return &RocksDB{ - storage: storage, - }, nil -} - -func (db *RocksDB) Close() error { - db.storage.Close() - db.storage = nil - return nil -} - -func (db *RocksDB) Get(key []byte) ([]byte, error) { - bz, err := db.storage.GetBytes(defaultReadOpts, key) - if err != nil { - return nil, err - } - - return bz, nil -} - -func (db *RocksDB) Has(key []byte) (bool, error) { - bz, err := db.Get(key) - if err != nil { - return false, err - } - - return bz != nil, nil -} - -func (db *RocksDB) Set(key, value []byte) error { - if len(key) == 0 { - return storeerrors.ErrKeyEmpty - } - if value == nil { - return storeerrors.ErrValueNil - } - - return db.storage.Put(grocksdb.NewDefaultWriteOptions(), key, value) -} - -func (db *RocksDB) Delete(key []byte) error { - if len(key) == 0 { - return storeerrors.ErrKeyEmpty - } - - return db.storage.Delete(grocksdb.NewDefaultWriteOptions(), key) -} - -func (db *RocksDB) Iterator(start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, storeerrors.ErrKeyEmpty - } - - itr := db.storage.NewIterator(defaultReadOpts) - return newRocksDBIterator(itr, start, end, false), nil -} - -func (db *RocksDB) ReverseIterator(start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, storeerrors.ErrKeyEmpty - } - - itr := db.storage.NewIterator(defaultReadOpts) - return newRocksDBIterator(itr, start, end, true), nil -} - -func (db *RocksDB) NewBatch() corestore.Batch { - return &rocksDBBatch{ - db: db, - batch: grocksdb.NewWriteBatch(), - } -} - -func (db *RocksDB) NewBatchWithSize(_ int) corestore.Batch { - return db.NewBatch() -} - -var _ corestore.Iterator = (*rocksDBIterator)(nil) - -type rocksDBIterator struct { - source *grocksdb.Iterator - start []byte - end []byte - valid bool - reverse bool -} - -func newRocksDBIterator(src *grocksdb.Iterator, start, end []byte, reverse bool) *rocksDBIterator { - if reverse { - if end == nil { - src.SeekToLast() - } else { - src.Seek(end) - - if src.Valid() { - eoaKey := readOnlySlice(src.Key()) // end or after key - if bytes.Compare(end, eoaKey) <= 0 { - src.Prev() - } - } else { - src.SeekToLast() - } - } - } else { - if start == nil { - src.SeekToFirst() - } else { - src.Seek(start) - } - } - - return &rocksDBIterator{ - source: src, - start: start, - end: end, - reverse: reverse, - valid: src.Valid(), - } -} - -func (itr *rocksDBIterator) Domain() (start, end []byte) { - return itr.start, itr.end -} - -func (itr *rocksDBIterator) Valid() bool { - // once invalid, forever invalid - if !itr.valid { - return false - } - - // if source has error, consider it invalid - if err := itr.source.Err(); err != nil { - itr.valid = false - return false - } - - // if source is invalid, consider it invalid - if !itr.source.Valid() { - itr.valid = false - return false - } - - // if key is at the end or past it, consider it invalid - start := itr.start - end := itr.end - key := readOnlySlice(itr.source.Key()) - - if itr.reverse { - if start != nil && bytes.Compare(key, start) < 0 { - itr.valid = false - return false - } - } else { - if end != nil && bytes.Compare(end, key) <= 0 { - itr.valid = false - return false - } - } - - return true -} - -func (itr *rocksDBIterator) Key() []byte { - itr.assertIsValid() - return copyAndFreeSlice(itr.source.Key()) -} - -func (itr *rocksDBIterator) Value() []byte { - itr.assertIsValid() - return copyAndFreeSlice(itr.source.Value()) -} - -func (itr *rocksDBIterator) Next() { - if !itr.valid { - return - } - - if itr.reverse { - itr.source.Prev() - } else { - itr.source.Next() - } -} - -func (itr *rocksDBIterator) Error() error { - return itr.source.Err() -} - -func (itr *rocksDBIterator) Close() error { - itr.source.Close() - itr.source = nil - itr.valid = false - - return nil -} - -func (itr *rocksDBIterator) assertIsValid() { - if !itr.valid { - panic("rocksDB iterator is invalid") - } -} - -type rocksDBBatch struct { - db *RocksDB - batch *grocksdb.WriteBatch -} - -func (b *rocksDBBatch) Set(key, value []byte) error { - if len(key) == 0 { - return storeerrors.ErrKeyEmpty - } - if value == nil { - return storeerrors.ErrValueNil - } - if b.batch == nil { - return storeerrors.ErrBatchClosed - } - - b.batch.Put(key, value) - return nil -} - -func (b *rocksDBBatch) Delete(key []byte) error { - if len(key) == 0 { - return storeerrors.ErrKeyEmpty - } - if b.batch == nil { - return storeerrors.ErrBatchClosed - } - - b.batch.Delete(key) - return nil -} - -func (b *rocksDBBatch) Write() error { - writeOpts := grocksdb.NewDefaultWriteOptions() - writeOpts.SetSync(false) - - if err := b.db.storage.Write(writeOpts, b.batch); err != nil { - return fmt.Errorf("failed to write RocksDB batch: %w", err) - } - - return nil -} - -func (b *rocksDBBatch) WriteSync() error { - writeOpts := grocksdb.NewDefaultWriteOptions() - writeOpts.SetSync(true) - - if err := b.db.storage.Write(writeOpts, b.batch); err != nil { - return fmt.Errorf("failed to write RocksDB batch: %w", err) - } - - return nil -} - -func (b *rocksDBBatch) Close() error { - b.batch.Destroy() - return nil -} - -func (b *rocksDBBatch) GetByteSize() (int, error) { - return len(b.batch.Data()), nil -} - -func readOnlySlice(s *grocksdb.Slice) []byte { - if !s.Exists() { - return nil - } - - return s.Data() -} - -// copyAndFreeSlice will copy a given RocksDB slice and free it. If the slice -// does not exist, will be returned. -func copyAndFreeSlice(s *grocksdb.Slice) []byte { - defer s.Free() - - if !s.Exists() { - return nil - } - - return slices.Clone(s.Data()) -} diff --git a/store/v2/db/rocksdb_noflag.go b/store/v2/db/rocksdb_noflag.go deleted file mode 100644 index ab6ecba70753..000000000000 --- a/store/v2/db/rocksdb_noflag.go +++ /dev/null @@ -1,60 +0,0 @@ -//go:build !rocksdb -// +build !rocksdb - -package db - -import ( - coreserver "cosmossdk.io/core/server" - corestore "cosmossdk.io/core/store" -) - -var _ corestore.KVStoreWithBatch = (*RocksDB)(nil) - -// RocksDB implements `corestore.KVStoreWithBatch` using RocksDB as the underlying storage engine. -// It is used for only store v2 migration, since some clients use RocksDB as -// the IAVL v0/v1 backend. -type RocksDB struct{} - -func NewRocksDB(name, dataDir string) (*RocksDB, error) { - panic("rocksdb must be built with -tags rocksdb") -} - -func NewRocksDBWithOpts(dataDir string, opts coreserver.DynamicConfig) (*RocksDB, error) { - panic("rocksdb must be built with -tags rocksdb") -} - -func (db *RocksDB) Close() error { - panic("rocksdb must be built with -tags rocksdb") -} - -func (db *RocksDB) Get(key []byte) ([]byte, error) { - panic("rocksdb must be built with -tags rocksdb") -} - -func (db *RocksDB) Has(key []byte) (bool, error) { - panic("rocksdb must be built with -tags rocksdb") -} - -func (db *RocksDB) Set(key, value []byte) error { - panic("rocksdb must be built with -tags rocksdb") -} - -func (db *RocksDB) Delete(key []byte) error { - panic("rocksdb must be built with -tags rocksdb") -} - -func (db *RocksDB) Iterator(start, end []byte) (corestore.Iterator, error) { - panic("rocksdb must be built with -tags rocksdb") -} - -func (db *RocksDB) ReverseIterator(start, end []byte) (corestore.Iterator, error) { - panic("rocksdb must be built with -tags rocksdb") -} - -func (db *RocksDB) NewBatch() corestore.Batch { - panic("rocksdb must be built with -tags rocksdb") -} - -func (db *RocksDB) NewBatchWithSize(_ int) corestore.Batch { - panic("rocksdb must be built with -tags rocksdb") -} diff --git a/store/v2/db/rocksdb_test.go b/store/v2/db/rocksdb_test.go deleted file mode 100644 index 75147a56855d..000000000000 --- a/store/v2/db/rocksdb_test.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package db - -import ( - "testing" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" -) - -func TestRocksDBSuite(t *testing.T) { - db, err := NewRocksDB("test", t.TempDir()) - require.NoError(t, err) - - suite.Run(t, &DBTestSuite{ - db: db, - }) -} diff --git a/store/v2/go.mod b/store/v2/go.mod index b2fe4ca936c2..6223bfc08e4e 100644 --- a/store/v2/go.mod +++ b/store/v2/go.mod @@ -15,7 +15,6 @@ require ( github.com/cosmos/ics23/go v0.11.0 github.com/google/btree v1.1.3 github.com/hashicorp/go-metrics v0.5.3 - github.com/linxGnu/grocksdb v1.9.3 github.com/spf13/cast v1.7.1 github.com/stretchr/testify v1.10.0 github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d diff --git a/store/v2/go.sum b/store/v2/go.sum index 3cebafb2b28f..45aa36096be2 100644 --- a/store/v2/go.sum +++ b/store/v2/go.sum @@ -161,8 +161,6 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/linxGnu/grocksdb v1.9.3 h1:s1cbPcOd0cU2SKXRG1nEqCOWYAELQjdqg3RVI2MH9ik= -github.com/linxGnu/grocksdb v1.9.3/go.mod h1:QYiYypR2d4v63Wj1adOOfzglnoII0gLj3PNh4fZkcFA= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= diff --git a/systemtests/getting_started.md b/systemtests/getting_started.md index 7adc98f1b3e9..e9b5ad01539e 100644 --- a/systemtests/getting_started.md +++ b/systemtests/getting_started.md @@ -92,7 +92,7 @@ At the end is a tail from the server log printed. This can sometimes be handy wh When we have a json response, the [gjson](https://github.com/tidwall/gjson) lib can shine. It comes with jquery like syntax that makes it easy to navigation within the document. -For example `gjson.Get(raw, "supply").Array()` gives us all the childs to `supply` as an array. +For example `gjson.Get(raw, "supply").Array()` gives us all the children to `supply` as an array. Or `gjson.Get("supply.#(denom==stake).amount").Int()` for the amount of the stake token as int64 type. In order to test our assumptions in the system test, we modify the code to use `gjson` to fetch the data: diff --git a/x/accounts/defaults/lockup/README.md b/x/accounts/defaults/lockup/README.md index 879d4a9a7e1d..a68784a1f0ba 100644 --- a/x/accounts/defaults/lockup/README.md +++ b/x/accounts/defaults/lockup/README.md @@ -118,15 +118,15 @@ The spendable amount is calculated as: `spendableAmount` = `balance` - `notBondedLockedAmount` where `notBondedLockedAmount` = `lockedAmount` - `Min(lockedAmount, delegatedLockedAmount)` -As seen in the formula `notBondedLockedAmout` can only be 0 or a positive value when `DelegatedLockedAmount` < `LockedAmount`. Let call `NewDelegatedLockedAmount` is the `delegatedLockedAmount` when applying N slash +As seen in the formula `notBondedLockedAmount` can only be 0 or a positive value when `DelegatedLockedAmount` < `LockedAmount`. Let call `NewDelegatedLockedAmount` is the `delegatedLockedAmount` when applying N slash 1. Case 1: Originally `DelegatedLockedAmount` > `lockedAmount` but when applying the slash amount the `NewDelegatedLockedAmount` < `lockedAmount` then - * When not applying slash `notBondedLockedAmout` will be 0 - * When apply slash `notBondedLockedAmout` will be `lockedAmount` - `NewDelegatedLockedAmount` = a positive amount + * When not applying slash `notBondedLockedAmount` will be 0 + * When apply slash `notBondedLockedAmount` will be `lockedAmount` - `NewDelegatedLockedAmount` = a positive amount 2. Case 2: where originally `DelegatedLockedAmount` < `lockedAmount` when applying the slash amount the `NewDelegatedLockedAmount` < `lockedAmount` then * When not applying slash `lockedAmount` - `DelegatedLockedAmount` - * When apply slash `notBondedLockedAmout` will be `lockedAmount` - `NewDelegatedLockedAmount` = `lockedAmount` - `(DelegatedLockedAmount - N)` = `lockedAmount` - `DelegatedLockedAmount` + N -3. Case 3: where originally `DelegatedLockedAmount` > `lockedAmount` when applying the slash amount still the `NewDelegatedLockedAmount` > `lockedAmount` then `notBondedLockedAmout` will be 0 applying slash or not + * When apply slash `notBondedLockedAmount` will be `lockedAmount` - `NewDelegatedLockedAmount` = `lockedAmount` - `(DelegatedLockedAmount - N)` = `lockedAmount` - `DelegatedLockedAmount` + N +3. Case 3: where originally `DelegatedLockedAmount` > `lockedAmount` when applying the slash amount still the `NewDelegatedLockedAmount` > `lockedAmount` then `notBondedLockedAmount` will be 0 applying slash or not In cases 1 and 2, `notBondedLockedAmount` decreases when not applying the slash, resulting in a higher `spendableAmount`. diff --git a/x/auth/ante/sigverify.go b/x/auth/ante/sigverify.go index 3b83b17e3379..fc93c3861a18 100644 --- a/x/auth/ante/sigverify.go +++ b/x/auth/ante/sigverify.go @@ -69,18 +69,24 @@ type AccountAbstractionKeeper interface { // // CONTRACT: Tx must implement SigVerifiableTx interface type SigVerificationDecorator struct { - ak AccountKeeper - aaKeeper AccountAbstractionKeeper - signModeHandler *txsigning.HandlerMap - sigGasConsumer SignatureVerificationGasConsumer + ak AccountKeeper + aaKeeper AccountAbstractionKeeper + signModeHandler *txsigning.HandlerMap + sigGasConsumer SignatureVerificationGasConsumer + extraVerifyIsOnCurve func(pubKey cryptotypes.PubKey) (bool, error) } func NewSigVerificationDecorator(ak AccountKeeper, signModeHandler *txsigning.HandlerMap, sigGasConsumer SignatureVerificationGasConsumer, aaKeeper AccountAbstractionKeeper) SigVerificationDecorator { + return NewSigVerificationDecoratorWithVerifyOnCurve(ak, signModeHandler, sigGasConsumer, aaKeeper, nil) +} + +func NewSigVerificationDecoratorWithVerifyOnCurve(ak AccountKeeper, signModeHandler *txsigning.HandlerMap, sigGasConsumer SignatureVerificationGasConsumer, aaKeeper AccountAbstractionKeeper, verifyFn func(pubKey cryptotypes.PubKey) (bool, error)) SigVerificationDecorator { return SigVerificationDecorator{ - aaKeeper: aaKeeper, - ak: ak, - signModeHandler: signModeHandler, - sigGasConsumer: sigGasConsumer, + aaKeeper: aaKeeper, + ak: ak, + signModeHandler: signModeHandler, + sigGasConsumer: sigGasConsumer, + extraVerifyIsOnCurve: verifyFn, } } @@ -105,7 +111,13 @@ func OnlyLegacyAminoSigners(sigData signing.SignatureData) bool { } } -func verifyIsOnCurve(pubKey cryptotypes.PubKey) (err error) { +func (svd SigVerificationDecorator) VerifyIsOnCurve(pubKey cryptotypes.PubKey) error { + if svd.extraVerifyIsOnCurve != nil { + handled, err := svd.extraVerifyIsOnCurve(pubKey) + if handled { + return err + } + } // when simulating pubKey.Key will always be nil if pubKey.Bytes() == nil { return nil @@ -134,7 +146,7 @@ func verifyIsOnCurve(pubKey cryptotypes.PubKey) (err error) { pubKeysObjects := typedPubKey.GetPubKeys() ok := true for _, pubKeyObject := range pubKeysObjects { - if err := verifyIsOnCurve(pubKeyObject); err != nil { + if err := svd.VerifyIsOnCurve(pubKeyObject); err != nil { ok = false break } @@ -417,7 +429,7 @@ func (svd SigVerificationDecorator) setPubKey(ctx context.Context, acc sdk.Accou return sdkerrors.ErrInvalidPubKey.Wrapf("the account %s cannot be claimed by public key with address %x", acc.GetAddress(), txPubKey.Address()) } - err := verifyIsOnCurve(txPubKey) + err := svd.VerifyIsOnCurve(txPubKey) if err != nil { return err } diff --git a/x/auth/vesting/README.md b/x/auth/vesting/README.md index 5fd33462a949..f72e8b6aff7d 100644 --- a/x/auth/vesting/README.md +++ b/x/auth/vesting/README.md @@ -46,7 +46,7 @@ For all vesting accounts, the owner of the vesting account is able to delegate a ## Note -Vesting accounts can be initialized with some vesting and non-vesting coins. The non-vesting coins would be immediately transferable. DelayedVesting ContinuousVesting, PeriodicVesting and PermenantVesting accounts can be created with normal messages after genesis. Other types of vesting accounts must be created at genesis, or as part of a manual network upgrade. The current specification only allows for _unconditional_ vesting (ie. there is no possibility of reaching `ET` and +Vesting accounts can be initialized with some vesting and non-vesting coins. The non-vesting coins would be immediately transferable. DelayedVesting ContinuousVesting, PeriodicVesting and PermanentVesting accounts can be created with normal messages after genesis. Other types of vesting accounts must be created at genesis, or as part of a manual network upgrade. The current specification only allows for _unconditional_ vesting (ie. there is no possibility of reaching `ET` and having coins fail to vest). ## Vesting Account Types